提交 69a7b300 编写于 作者: JOKER0810's avatar JOKER0810

更新LogIn.ui, zzf.jpg, hand_writing.py, LogIn.py, mainFunction.py,...

更新LogIn.ui, zzf.jpg, hand_writing.py, LogIn.py, mainFunction.py, preFunction.py, recognize.py, recognize_character.qrc, recognize_character.py
上级
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'LogIn.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_LogIn(object):
def setupUi(self, LogIn):
LogIn.setObjectName("LogIn")
LogIn.resize(1231, 678)
self.centralwidget = QtWidgets.QWidget(LogIn)
self.centralwidget.setStyleSheet("QWidget#centralwidget{background:rgb(255, 255, 255)}")
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_cap = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.label_cap.sizePolicy().hasHeightForWidth())
self.label_cap.setSizePolicy(sizePolicy)
self.label_cap.setMinimumSize(QtCore.QSize(640, 480))
self.label_cap.setText("")
self.label_cap.setObjectName("label_cap")
self.horizontalLayout.addWidget(self.label_cap)
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(17)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2, 0, QtCore.Qt.AlignHCenter)
self.lineEdit = QtWidgets.QLineEdit(self.widget)
self.lineEdit.setMinimumSize(QtCore.QSize(0, 80))
font = QtGui.QFont()
font.setPointSize(24)
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout.addWidget(self.lineEdit)
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setMinimumSize(QtCore.QSize(100, 120))
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_ok = QtWidgets.QPushButton(self.widget_2)
self.pushButton_ok.setMinimumSize(QtCore.QSize(0, 100))
self.pushButton_ok.setObjectName("pushButton_ok")
self.horizontalLayout_2.addWidget(self.pushButton_ok)
self.pushButton_delete = QtWidgets.QPushButton(self.widget_2)
self.pushButton_delete.setMinimumSize(QtCore.QSize(0, 100))
self.pushButton_delete.setObjectName("pushButton_delete")
self.horizontalLayout_2.addWidget(self.pushButton_delete)
self.verticalLayout.addWidget(self.widget_2)
self.label_picture = QtWidgets.QLabel(self.widget)
self.label_picture.setMinimumSize(QtCore.QSize(540, 360))
font = QtGui.QFont()
font.setPointSize(13)
self.label_picture.setFont(font)
self.label_picture.setStyleSheet("border-image:url(:/decorate/zzf.jpg)")
self.label_picture.setText("")
self.label_picture.setObjectName("label_picture")
self.verticalLayout.addWidget(self.label_picture)
self.horizontalLayout.addWidget(self.widget)
LogIn.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(LogIn)
self.statusbar.setObjectName("statusbar")
LogIn.setStatusBar(self.statusbar)
self.retranslateUi(LogIn)
QtCore.QMetaObject.connectSlotsByName(LogIn)
def retranslateUi(self, LogIn):
_translate = QtCore.QCoreApplication.translate
LogIn.setWindowTitle(_translate("LogIn", "MainWindow"))
self.label_2.setText(_translate("LogIn", "请输入密码"))
self.pushButton_ok.setText(_translate("LogIn", "确定"))
self.pushButton_delete.setText(_translate("LogIn", "删除"))
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>LogIn</class>
<widget class="QMainWindow" name="LogIn">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>1231</width>
<height>678</height>
</rect>
</property>
<property name="windowTitle">
<string>MainWindow</string>
</property>
<widget class="QWidget" name="centralwidget">
<property name="styleSheet">
<string notr="true">QWidget#centralwidget{background:rgb(255, 255, 255)}</string>
</property>
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="QLabel" name="label_cap">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>4</horstretch>
<verstretch>3</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>640</width>
<height>480</height>
</size>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
<item>
<widget class="QWidget" name="widget" native="true">
<layout class="QVBoxLayout" name="verticalLayout">
<item alignment="Qt::AlignHCenter">
<widget class="QLabel" name="label_2">
<property name="font">
<font>
<pointsize>17</pointsize>
</font>
</property>
<property name="text">
<string>请输入密码</string>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="lineEdit">
<property name="minimumSize">
<size>
<width>0</width>
<height>80</height>
</size>
</property>
<property name="font">
<font>
<pointsize>24</pointsize>
</font>
</property>
</widget>
</item>
<item>
<widget class="QWidget" name="widget_2" native="true">
<property name="minimumSize">
<size>
<width>100</width>
<height>120</height>
</size>
</property>
<layout class="QHBoxLayout" name="horizontalLayout_2">
<item>
<widget class="QPushButton" name="pushButton_ok">
<property name="minimumSize">
<size>
<width>0</width>
<height>100</height>
</size>
</property>
<property name="text">
<string>确定</string>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="pushButton_delete">
<property name="minimumSize">
<size>
<width>0</width>
<height>100</height>
</size>
</property>
<property name="text">
<string>删除</string>
</property>
</widget>
</item>
</layout>
</widget>
</item>
<item>
<widget class="QLabel" name="label_picture">
<property name="minimumSize">
<size>
<width>540</width>
<height>360</height>
</size>
</property>
<property name="font">
<font>
<pointsize>13</pointsize>
</font>
</property>
<property name="styleSheet">
<string notr="true">border-image:url(:/decorate/zzf.jpg)</string>
</property>
<property name="text">
<string/>
</property>
</widget>
</item>
</layout>
</widget>
</item>
</layout>
</widget>
<widget class="QStatusBar" name="statusbar"/>
</widget>
<resources>
<include location="recognize_character.qrc"/>
</resources>
<connections/>
</ui>
import cv2
import mediapipe as mp
import numpy as np
import autopy
import math
import matplotlib.pyplot as plt
def index_thumb_pt(img,result):
h, w = img.shape[0], img.shape[1]
hand_dic = {}
hand_21 = result.multi_hand_landmarks[0]
thumb_x = hand_21.landmark[4].x * w
thumb_y = hand_21.landmark[4].y * h
index_x = hand_21.landmark[8].x * w
index_y = hand_21.landmark[8].y * h
choose_pt = (int((thumb_x + index_x)/2),int((thumb_y + index_y)/2))
dst = np.sqrt(np.square(thumb_x - index_x) + np.square(thumb_y - index_y))
click_state = False
if dst < 30 and hand_21.landmark[3].x < hand_21.landmark[12].x < hand_21.landmark[17].x and \
hand_21.landmark[3].x < hand_21.landmark[16].x < hand_21.landmark[17].x and \
hand_21.landmark[3].x < hand_21.landmark[20].x < hand_21.landmark[17].x and \
hand_21.landmark[6].y < hand_21.landmark[20].y < hand_21.landmark[0].y and \
hand_21.landmark[6].y < hand_21.landmark[16].y < hand_21.landmark[0].y and \
hand_21.landmark[6].y < hand_21.landmark[12].y < hand_21.landmark[0].y:
click_state = True
cv2.circle(img, choose_pt, 10, (0, 0, 255), -1) # 绘制点击坐标,为轨迹的坐标
cv2.circle(img, choose_pt, 5, (255, 220, 30), -1)
hand_dic['pt'] = choose_pt
hand_dic['click'] = click_state
return img, hand_dic
def p_to_p_distance(p1,p2):
return np.sqrt(np.square(p1[0] - p2[0]) + np.square(p1[1] - p2[1]))
def hand_point(result,h,w):
handpoint_list = []
hand_21 = result.multi_hand_landmarks[0]
handpoint_list.append([int(hand_21.landmark[0].x * w), int(hand_21.landmark[0].y * h)])
handpoint_list.append([int(hand_21.landmark[1].x * w), int(hand_21.landmark[1].y * h)])
handpoint_list.append([int(hand_21.landmark[2].x * w), int(hand_21.landmark[2].y * h)])
handpoint_list.append([int(hand_21.landmark[3].x * w), int(hand_21.landmark[3].y * h)])
handpoint_list.append([int(hand_21.landmark[4].x * w), int(hand_21.landmark[4].y * h)])
handpoint_list.append([int(hand_21.landmark[5].x * w), int(hand_21.landmark[5].y * h)])
handpoint_list.append([int(hand_21.landmark[6].x * w), int(hand_21.landmark[6].y * h)])
handpoint_list.append([int(hand_21.landmark[7].x * w), int(hand_21.landmark[7].y * h)])
handpoint_list.append([int(hand_21.landmark[8].x * w), int(hand_21.landmark[8].y * h)])
handpoint_list.append([int(hand_21.landmark[9].x * w), int(hand_21.landmark[9].y * h)])
handpoint_list.append([int(hand_21.landmark[10].x * w), int(hand_21.landmark[10].y * h)])
handpoint_list.append([int(hand_21.landmark[11].x * w), int(hand_21.landmark[11].y * h)])
handpoint_list.append([int(hand_21.landmark[12].x * w), int(hand_21.landmark[12].y * h)])
handpoint_list.append([int(hand_21.landmark[13].x * w), int(hand_21.landmark[13].y * h)])
handpoint_list.append([int(hand_21.landmark[14].x * w), int(hand_21.landmark[14].y * h)])
handpoint_list.append([int(hand_21.landmark[15].x * w), int(hand_21.landmark[15].y * h)])
handpoint_list.append([int(hand_21.landmark[16].x * w), int(hand_21.landmark[16].y * h)])
handpoint_list.append([int(hand_21.landmark[17].x * w), int(hand_21.landmark[17].y * h)])
handpoint_list.append([int(hand_21.landmark[18].x * w), int(hand_21.landmark[18].y * h)])
handpoint_list.append([int(hand_21.landmark[19].x * w), int(hand_21.landmark[19].y * h)])
handpoint_list.append([int(hand_21.landmark[20].x * w), int(hand_21.landmark[20].y * h)])
return handpoint_list
def judge_handpose(handpoint_list):
if handpoint_list[4][1] < handpoint_list[3][1] and p_to_p_distance(handpoint_list[8],handpoint_list[5]) < 50 and \
p_to_p_distance(handpoint_list[12],handpoint_list[9]) < 50 and p_to_p_distance(handpoint_list[16],handpoint_list[13]) < 50 and \
p_to_p_distance(handpoint_list[20],handpoint_list[17]) < 50 and \
abs(handpoint_list[4][0] - handpoint_list[3][0]) < 5 and abs(handpoint_list[5][0] - handpoint_list[17][0]) < 5:
return 'Thumb_up'
elif handpoint_list[8][1] < handpoint_list[7][1] < handpoint_list[6][1] and \
handpoint_list[10][1] < handpoint_list[11][1] < handpoint_list[12][1] and \
handpoint_list[14][1] < handpoint_list[15][1] < handpoint_list[16][1] and \
handpoint_list[18][1] < handpoint_list[19][1] < handpoint_list[20][1] and \
p_to_p_distance(handpoint_list[4],handpoint_list[12])< 50:
return 'Index_up'
elif handpoint_list[12][1] < handpoint_list[11][1] < handpoint_list[10][1] and \
handpoint_list[8][1] < handpoint_list[7][1] < handpoint_list[6][1] and \
handpoint_list[14][1] < handpoint_list[15][1] < handpoint_list[16][1] and \
handpoint_list[18][1] < handpoint_list[19][1] < handpoint_list[20][1] and \
p_to_p_distance(handpoint_list[4],handpoint_list[16]) < 50:
return 'Index_middle_up'
elif handpoint_list[20][1] < handpoint_list[19][1] < handpoint_list[18][1] and \
handpoint_list[10][1] < handpoint_list[11][1] < handpoint_list[12][1] and \
handpoint_list[14][1] < handpoint_list[15][1] < handpoint_list[16][1] and \
handpoint_list[6][1] < handpoint_list[7][1] < handpoint_list[8][1] and \
p_to_p_distance(handpoint_list[4],handpoint_list[12]) < 50:
return 'Pinky_up'
elif p_to_p_distance(handpoint_list[8],handpoint_list[12]) < 40 and \
p_to_p_distance(handpoint_list[12],handpoint_list[16]) < 40 and \
p_to_p_distance(handpoint_list[16],handpoint_list[20]) < 40 and \
p_to_p_distance(handpoint_list[4],handpoint_list[8]) < 40:
return 'Fingers_together'
elif p_to_p_distance(handpoint_list[4],handpoint_list[0]) > p_to_p_distance(handpoint_list[5],handpoint_list[0]) and \
p_to_p_distance(handpoint_list[8],handpoint_list[5]) > 4 * p_to_p_distance(handpoint_list[5],handpoint_list[9]) and \
p_to_p_distance(handpoint_list[12],handpoint_list[9]) > 4 * p_to_p_distance(handpoint_list[5],handpoint_list[9]) and \
p_to_p_distance(handpoint_list[16],handpoint_list[13]) > 4 * p_to_p_distance(handpoint_list[5],handpoint_list[9]) and \
p_to_p_distance(handpoint_list[20], handpoint_list[17]) > 3 * p_to_p_distance(handpoint_list[5],handpoint_list[9]):
return 'GiveMeFive'
else:
return None
def draw_character(gesture_lines,img):
if len(gesture_lines) >= 2:
pt1 = gesture_lines[-1]
pt2 = gesture_lines[-2]
if np.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) < 30:
cv2.line(img, pt1, pt2, (0,0,255), thickness=6)
def according_area_sort(elem):
return cv2.contourArea(elem)
def flit_sort_area_contours(contours,n):
contours.sort(key=according_area_sort,reverse=True)
return contours[:n]
def get_angle_plot(line1, line2):
l1xy = line1.get_xydata()
# Angle between line1 and x-axis
slope1 = (l1xy[1][1] - l1xy[0][1]) / float(l1xy[1][0] - l1xy[0][0])
angle1 = abs(math.degrees(math.atan(slope1))) # Taking only the positive angle
l2xy = line2.get_xydata()
# Angle between line2 and x-axis
slope2 = (l2xy[1][1] - l2xy[0][1]) / float(l2xy[1][0] - l2xy[0][0])
angle2 = abs(math.degrees(math.atan(slope2)))
theta1 = min(angle1, angle2)
theta2 = max(angle1, angle2)
angle = theta2 - theta1
return angle
if __name__ == '__main__':
wScr, hScr = autopy.screen.size()
wCam, hCam = 640, 480
smoothening = 7
initial_length = 100
plocX, plocY = 0, 0
clocX, clocY = 0, 0
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.7,
min_tracking_confidence=0.5)
mpDraw = mp.solutions.drawing_utils
gesture_lines = []
cap = cv2.VideoCapture(0)
cap.open(0)
img_black = np.zeros((hCam,wCam,3),dtype='uint8')
while cap.isOpened():
ret, frame = cap.read()
if not ret:
print('error')
h, w, c = frame.shape[0], frame.shape[1],frame.shape[2]
# print(h,w,c)
frame = cv2.flip(frame, 1)
img_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(img_RGB)
if results.multi_hand_landmarks:
mpDraw.draw_landmarks(frame,results.multi_hand_landmarks[0],mp_hands.HAND_CONNECTIONS)
handpoint_list = hand_point(results,h,w)
hand_pose = judge_handpose(handpoint_list)
if hand_pose == 'Thumb_up' and len(gesture_lines) > 10:
cv2.imwrite('picture/character.jpg', img_black)
gesture_lines = []
img_black = np.zeros((h, w, c),dtype='uint8')
elif hand_pose =='Index_up':
index_x,index_y = handpoint_list[8]
screen_x = np.interp(index_x, (0, wCam),(0, wScr))
screen_y = np.interp(index_y, (0, hCam), (0, hScr))
clocX = plocX + (screen_x - plocX) / smoothening
clocY = plocY + (screen_y - plocY) / smoothening
autopy.mouse.move(clocX,clocY)
cv2.circle(frame,(index_x,index_y),10,(255,0,255),cv2.FILLED)
plocX, plocY = clocX, clocY
elif hand_pose == 'Index_middle_up':
if p_to_p_distance(handpoint_list[8],handpoint_list[12]) < 50:
index_x, index_y = handpoint_list[8]
middle_x, middle_y = handpoint_list[12]
click_x, click_y = int((index_x + middle_x)/2), int((index_y + middle_y)/2)
cv2.circle(frame, (click_x, click_y), 10, (0, 255, 0), cv2.FILLED)
autopy.mouse.click()
elif hand_pose == 'Pinky_up':
pinky_x, pinky_y = handpoint_list[20]
cv2.circle(frame, (pinky_x, pinky_y), 15, (0, 255, 0), cv2.FILLED)
cv2.circle(img_black, (pinky_x, pinky_y), 15, (0, 0, 0), cv2.FILLED)
# elif hand_pose == 'Fingers_together':
# gray = cv2.cvtColor(img_black,cv2.COLOR_BGR2GRAY)
# _, imgBinary = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
# contours,_ = cv2.findContours(imgBinary,cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# if contours :
# contours_sorted = flit_sort_area_contours(contours,1)
# img_black = cv2.drawContours(img_black,contours_sorted,-1,(0, 0, 255),cv2.FILLED)
# elif hand_pose == 'GiveMeFive':
# img_black_copy = img_black.copy()
# x1 = (handpoint_list[8][0], handpoint_list[5][0])
# y1 = (handpoint_list[8][1], handpoint_list[5][1])
# line1 = plt.plot(x1, y1)
# x2 = (100, 100)
# y2 = (100, 200)
# line2 = plt.plot(x2, y2)
# angle = get_angle_plot(line1[0], line2[0])
# angle = round(angle, 2)
# gray = cv2.cvtColor(img_black_copy, cv2.COLOR_BGR2GRAY)
# _, imgBinary = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
# contours, _ = cv2.findContours(imgBinary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# if contours:
# contours_sorted = flit_sort_area_contours(contours, 1)
# min_rect = cv2.minAreaRect(contours_sorted[0])
# scale_size = p_to_p_distance(handpoint_list[8],handpoint_list[5])/ initial_length
# mat = cv2.getRotationMatrix2D(min_rect[0], -angle * 2, scale_size)
# # 这里有三个参数 分别是中心位置,旋转角度,缩放程度
# img_black_copy = cv2.warpAffine(img_black_copy, mat, (h, w))
# img_black_copy = cv2.resize(img_black_copy,(w,h))
# cv2.imshow('window', img_black_copy)
frame,hand_list = index_thumb_pt(frame,results)
if hand_list['click']:
draw_character(gesture_lines, img_black)
gesture_lines.append(hand_list["pt"])
img_gray = cv2.cvtColor(img_black,cv2.COLOR_BGR2GRAY)
_, imgInv = cv2.threshold(img_gray,50, 255, cv2.THRESH_BINARY_INV)
imgInv = cv2.cvtColor(imgInv,cv2.COLOR_GRAY2BGR)
img = cv2.bitwise_and(frame,imgInv)
img = cv2.bitwise_or(img,img_black)
cv2.imshow('my_window',img)
if cv2.waitKey(1) in [ord('q'),27]:
break
cap.release()
cv2.destroyAllWindows()
\ No newline at end of file
from hand_writing import *
from recognize import baiduOCR
import sys
import LogIn
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import recognize_character
import preFunction
import mediapipe as mp
import numpy as np
import autopy
class MyWindow(QMainWindow,LogIn.Ui_LogIn):
def __init__(self):
super(MyWindow, self).__init__()
self.setupUi(self)
self.initail_condition() #初始化条件
self.my_thread = MyThread() # 实例化线程对象
self.my_thread.my_signal.connect(self.insert_word) #绑定线程的信号与槽
self.timer_camera.timeout.connect(self.show_camera) #绑定信号与槽
self.pushButton_ok.clicked.connect(self.password_handin)
self.pushButton_delete.clicked.connect(self.password_clear)
def initail_condition(self):
self.wScr, self.hScr = autopy.screen.size() #获取屏幕宽和高
self.wCam, self.hCam = 640, 480 #获取摄像头宽和高
self.smoothening = 7 #用于平滑鼠标移动
self.plocX, self.plocY = 0, 0
self.clocX, self.clocY = 0, 0
self.mp_hands = mp.solutions.hands #实例化手部检测
self.hands = self.mp_hands.Hands(static_image_mode=False,
max_num_hands=1, #最多检测几只手
min_detection_confidence=0.7, #检测出手的置信度
min_tracking_confidence=0.5) #追踪是否为同一只手的置信度
self.mpDraw = mp.solutions.drawing_utils
self.cap = cv2.VideoCapture()
self.timer_camera = QTimer()
self.timer_camera.start(30) #设置定时器,30ms更新一次图片
self.cap.open(0)
self.gesture_lines = [] #用与存放手写文字的每个点
self.img_black = np.zeros((self.hCam, self.wCam, 3), dtype='uint8') #创建一个黑底图片,将文字写在上面
def insert_word(self,word):
if word:
self.lineEdit.insert(word)
else:
QMessageBox.warning(self, '警告', '写的太丑了请重新输入')
self.my_thread.terminate()
def show_camera(self):
flag, self.image = self.cap.read() # 从视频流中读取
h, w, c = self.image.shape[0], self.image.shape[1], self.image.shape[2]
# print(h,w,c)
frame = cv2.flip(self.image, 1)
img_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = self.hands.process(img_RGB) #获取图片的手的信息
if results.multi_hand_landmarks:
self.mpDraw.draw_landmarks(frame, results.multi_hand_landmarks[0], self.mp_hands.HAND_CONNECTIONS)
#画出检测出的第一只手的信息(也只检测出一只)
handpoint_list = hand_point(results, h, w) #获取手部每个关键点的坐标
hand_pose = judge_handpose(handpoint_list) #通过各个关键点的位置判断手是什么姿势
if hand_pose == 'Thumb_up' and len(self.gesture_lines) > 10:
cv2.imwrite('picture/character.jpg', self.img_black)
self.my_thread.start() #开启识别文字图片的线程
self.img_black = np.zeros((h, w, c), dtype='uint8') #重置黑底图片
self.gesture_lines = [] #重置文字各个点的坐标
elif hand_pose == 'Index_up':
index_x, index_y = handpoint_list[8]
screen_x = np.interp(index_x, (0, self.wCam), (0, self.wScr)) #将摄像头的长和宽映射到屏幕的长和宽
screen_y = np.interp(index_y, (0, self.hCam), (0, self.hScr))
self.clocX = self.plocX + (screen_x - self.plocX) / self.smoothening #平滑鼠标的移动
self.clocY = self.plocY + (screen_y - self.plocY) / self.smoothening
autopy.mouse.move(self.clocX, self.clocY) #鼠标移动
cv2.circle(frame, (index_x, index_y), 10, (255, 0, 255), cv2.FILLED)
self.plocX, self.plocY = self.clocX, self.clocY
elif hand_pose == 'Index_middle_up':
if p_to_p_distance(handpoint_list[8], handpoint_list[12]) < 50:
index_x, index_y = handpoint_list[8]
middle_x, middle_y = handpoint_list[12]
click_x, click_y = int((index_x + middle_x) / 2), int((index_y + middle_y) / 2)
cv2.circle(frame, (click_x, click_y), 10, (0, 255, 0), cv2.FILLED)
autopy.mouse.click() #鼠标点击
elif hand_pose == 'Pinky_up':
pinky_x, pinky_y = handpoint_list[20]
cv2.circle(frame, (pinky_x, pinky_y), 15, (0, 255, 0), cv2.FILLED)
cv2.circle(self.img_black, (pinky_x, pinky_y), 15, (0, 0, 0), cv2.FILLED) #通过涂抹黑色,来实现橡皮擦功能
# elif hand_pose == 'Fingers_together':
# gray = cv2.cvtColor(self.img_black, cv2.COLOR_BGR2GRAY)
# _, imgBinary = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
# contours, _ = cv2.findContours(imgBinary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# if contours:
# contours_sorted = flit_sort_area_contours(contours, 1)
# self.img_black = cv2.drawContours(self.img_black, contours_sorted, -1, (0, 0, 255), cv2.FILLED)
frame, hand_list = index_thumb_pt(frame, results)
if hand_list['click']:
draw_character(self.gesture_lines, self.img_black) #在黑底图片上将文字的各个点连起来,实现写字的功能
self.gesture_lines.append(hand_list["pt"])
img_gray = cv2.cvtColor(self.img_black, cv2.COLOR_BGR2GRAY)
_, imgInv = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY_INV)
imgInv = cv2.cvtColor(imgInv, cv2.COLOR_GRAY2BGR)
img = cv2.bitwise_and(frame, imgInv)
img = cv2.bitwise_or(img, self.img_black) #这一部分是利用bitwise_and,bitwise_or将在黑底图片上写出来的字贴到摄像头的每一帧上面
new_width, new_height = preFunction.resize_picture(img, width=self.label_cap.width(),
height=self.label_cap.height())
#获取qypt中展示摄像头控件的宽和高
qt_img_detect = preFunction.cvimg_to_qtimg(img) #转换图片格式
new_img = qt_img_detect.scaled(new_width, new_height, Qt.KeepAspectRatio)
self.label_cap.setPixmap(QPixmap.fromImage(new_img))
self.label_cap.setAlignment(Qt.AlignCenter) #在控件里显示图片
def password_handin(self):
if self.lineEdit.text() == '小石成大事':
QMessageBox.information(self,'提示','输入密码正确')
else:
QMessageBox.information(self, '提示', '输入密码错误,请重新输入')
def password_clear(self):
self.lineEdit.backspace()
def center(self):
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
newLeft = int((screen.width()-size.width())/2)
newTop = int((screen.height()-size.height())/2-40)
self.move(newLeft,newTop) #让窗口显示在屏幕中间
def add_shadow(self):
# 给窗口添加阴影
self.effect_shadow = QGraphicsDropShadowEffect(self)
self.effect_shadow.setOffset(0, 0) # 偏移
self.effect_shadow.setBlurRadius(10) # 阴影半径
self.effect_shadow.setColor(Qt.gray) # 阴影颜色
self.widget.setGraphicsEffect(self.effect_shadow) # 将设置套用到widget窗口中
class MyThread(QThread): # 线程类
my_signal = pyqtSignal(str) # 自定义信号对象。参数str就代表这个信号可以传一个字符串
def __init__(self):
super(MyThread, self).__init__()
def run(self): # 线程执行函数
word = baiduOCR('picture/character.jpg') #文字识别
self.my_signal.emit(word) # 释放自定义的信号
if __name__ == "__main__":
app = QApplication(sys.argv)
MainWindow = MyWindow()
MainWindow.center()
MainWindow.add_shadow()
MainWindow.show()
sys.exit(app.exec_())
\ No newline at end of file
import cv2
import numpy as np
from PyQt5.QtGui import *
import math
def cvimg_to_qtimg(cvimg):
cvimg = cvimg.astype(np.uint8)
height, width, channels = cvimg.shape[:3]
cvimg = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB)
cvimg = QImage(cvimg.data, width, height, width * channels, QImage.Format_RGB888)
return cvimg
def resize_picture(img,width,height):
w = np.array(img).shape[1]
h = np.array(img).shape[0]
if w / width >= h / height:
ratio = w / width
else:
ratio = h / height
new_width = int(w / ratio)
new_height = int(h / ratio)
return new_width,new_height
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 09:37:38 2018
利用百度api实现图片文本识别
@author: XnCSD
"""
from os import path
import os
from aip import AipOcr
from PIL import Image
def convertimg(picfile, outdir):
'''调整图片大小,对于过大的图片进行压缩
picfile: 图片路径
outdir: 图片输出路径
'''
img = Image.open(picfile)
width, height = img.size
while (width * height > 4000000): # 该数值压缩后的图片大约 两百多k
width = width // 2
height = height // 2
new_img = img.resize((width, height), Image.BILINEAR)
new_img.save(path.join(outdir, os.path.basename(picfile)))
def baiduOCR(picfile):
filename = path.basename(picfile)
APP_ID = ''
API_KEY = ''
SECRECT_KEY = ''
client = AipOcr(APP_ID, API_KEY, SECRECT_KEY)
i = open(picfile, 'rb')
img = i.read()
print("正在识别图片:\t" + filename)
message = client.webImage(img) # 通用文字识别,每天 50 000 次免费
# message = client.basicAccurate(img) # 通用文字高精度识别,每天 800 次免费
print("识别成功!")
i.close();
if len(message['words_result']):
word = message.get('words_result')[0].get('words')
return word
else:
return None
此差异已折叠。
<RCC>
<qresource prefix="decorate">
<file>zzf.jpg</file>
</qresource>
</RCC>
zzf.jpg

276.2 KB

Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册