在按下按钮之前,打开cv wnidow

时间:2019-03-16 06:42:19

标签: python python-3.x video pyqt4

我想在单击按钮时打开一个打开的cv窗口。我从github下载了一个情绪识别模块,并设置了单击按钮时要调用的功能,但是当我运行文件时,它甚至会自动运行videocapture gui屏幕没有显示我该如何解决错误

这是我的主文件,我在其中调用另一个文件

`import sys
import os
import integrate
import subprocess

from PyQt4 import QtCore,QtGui,uic
from PyQt4.phonon import Phonon



class mainwindow(QtGui.QMainWindow):
    def __init__(self):
        super(mainwindow,self).__init__()
        self.setGeometry(50,50,500,500)
        self.setWindowTitle("PAVAI")
        #self.setStyleSheet("background-image: url(/home/balaji/galaxy.png)")


        Calculate = QtGui.QPushButton("video",self)
        Calculate.setStyleSheet('QPushButton {background-color: #d8cfcd; color: white;}')
        Calculate.move(100,200)
        Calculate.clicked.connect(self.play)


        self.show()

    def play(self):
        subprocess.Popen("integrate.py ", shell=True)

def main():
    app = QtGui.QApplication(sys.argv)
    pavai = mainwindow()
    sys.exit(app.exec_())

main()

`

Intergrate.py

import os
import sys

def video():

    from statistics import mode

    import cv2
    from keras.models import load_model
    import numpy as np

    from utils.datasets import get_labels
    from utils.inference import detect_faces
    from utils.inference import draw_text
    from utils.inference import draw_bounding_box
    from utils.inference import apply_offsets
    from utils.inference import load_detection_model
    from utils.preprocessor import preprocess_input

    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode,
                      color, 0, -45, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break


video()

`

我该如何立即解决它,当我运行文件时会打开此窗口

0 个答案:

没有答案