From 3ec524b52ed80016122390243c6bc12ee0e0e5d2 Mon Sep 17 00:00:00 2001 From: Jcen <172036972@qq.com> Date: Fri, 4 Oct 2024 13:00:37 +0800 Subject: [PATCH] Initial commit --- lesson1/QML.py | 106 ++ lesson1/cv2流播放.py | 3150 +++++++++++++++++++++++++++++++++++++++ lesson1/list拖拽信号.py | 71 + lesson1/maya_qt.py | 69 + lesson1/pyqt下重定向.py | 47 + lesson1/qt6截屏.py | 1121 ++++++++++++++ lesson1/链接.py | 2 + 7 files changed, 4566 insertions(+) create mode 100644 lesson1/QML.py create mode 100644 lesson1/cv2流播放.py create mode 100644 lesson1/list拖拽信号.py create mode 100644 lesson1/maya_qt.py create mode 100644 lesson1/pyqt下重定向.py create mode 100644 lesson1/qt6截屏.py create mode 100644 lesson1/链接.py diff --git a/lesson1/QML.py b/lesson1/QML.py new file mode 100644 index 0000000..9d8453f --- /dev/null +++ b/lesson1/QML.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +import sys +from PyQt5.QtWidgets import * +from PyQt5.QtQuickWidgets import QQuickWidget +from PyQt5.QtCore import QUrl, pyqtSignal, pyqtSlot +from PyQt5.QtCore import * + +class Widget(QWidget): + sigSendToQml = pyqtSignal(str) + + def __init__(self, parent=None): + super().__init__(parent) + self.setWindowTitle('Qt UI Demo') + self.setGeometry(100, 100, 640, 480) + # self.setStyleSheet("QWidget {background-color: transparent; border: 2px solid black; border-radius: 10px;}") + # self.setWindowFlags(Qt.FramelessWindowHint) + layout = QVBoxLayout() + self.L = QListWidget() + layout.addWidget(self.L) + self.quickWidget = QQuickWidget() + self.quickWidget.setFocus() + self.quickWidget.setResizeMode(QQuickWidget.SizeRootObjectToView) + self.quickWidget.setSource(QUrl('qml.qml')) + self.Ls = QListWidgetItem(self.L) + self.Ls.setSizeHint(self.quickWidget.sizeHint()) + self.L.addItem(self.Ls) + self.L.setItemWidget(self.Ls, self.quickWidget) + + self.quickWidget = QQuickWidget() + self.quickWidget.setFocus() + self.quickWidget.setResizeMode(QQuickWidget.SizeRootObjectToView) + self.quickWidget.setSource(QUrl('qml.qml')) + self.Ls = QListWidgetItem(self.L) + self.Ls.setSizeHint(self.quickWidget.sizeHint()) + self.L.addItem(self.Ls) + self.L.setItemWidget(self.Ls, self.quickWidget) + # layout.addWidget(self.quickWidget) + + self.lineEdit = QLineEdit() + layout.addWidget(self.lineEdit) + + send_button = QPushButton('Send') + send_button.clicked.connect(self.on_send_clicked) + layout.addWidget(send_button) + + self.setLayout(layout) + + self.sigSendToQml.connect(self.quickWidget.rootObject().sigQmlReceiveStr) + self.quickWidget.rootObject().sigQmlSendStr.connect(self.slotReceiveFormQml) + + @pyqtSlot() + def on_send_clicked(self): + self.sigSendToQml.emit(self.lineEdit.text()) + + @pyqtSlot(str) + def slotReceiveFormQml(self, string): + print('Received from QML:', string) + + +if __name__ == '__main__': + app = QApplication(sys.argv) + widget = Widget() + widget.show() + sys.exit(app.exec_()) + + + + +import QtQuick 2.9 +import QtQuick.Controls 2.2 + +Rectangle { + id: root + width: 400 + height: 300 + color: "transparent" // 设置背景颜色 + + border.color: "red" // 设置边框颜色 + border.width: 5 // 设置边框宽度 + radius: 10 // 设置圆角半径 + + signal sigQmlReceiveStr(string str) //定义接收信号(from QWidget) +signal sigQmlSendStr(string str) //定义发送信号(to QWidget) +Image { + id: image + source: "https://tse4-mm.cn.bing.net/th/id/OIP-C.duz6S7Fvygrqd6Yj_DcXAQHaF7?rs=1&pid=ImgDetMain" // 设置图片URL + anchors.centerIn: parent + width: 200 + height: 200 + fillMode: Image.PreserveAspectFit // 设置图片填充模式 +} + + +Text { + id: text + anchors.centerIn: parent + color: "red" + font.pixelSize: 100 + text: qsTr("hello world!") +} + +onSigQmlReceiveStr: { //信号对应槽函数 +text.text = str +root.sigQmlSendStr("received: " + str) //调用信号 +} +} \ No newline at end of file diff --git a/lesson1/cv2流播放.py b/lesson1/cv2流播放.py new file mode 100644 index 0000000..c8a697f --- /dev/null +++ b/lesson1/cv2流播放.py @@ -0,0 +1,3150 @@ +# -*- coding: utf-8 -*- +import time +import vlc +import cv2 +from PyQt5.QtGui import * +from PyQt5.QtCore import * +from PyQt5.QtWidgets import * +import sys +from multiprocessing import Process, Manager, freeze_support +from PyQt5.QtCore import QThread, pyqtSignal +from PyQt5.QtWidgets import QApplication + + +def process_frame(info_dict, frame_dict): + List = [] + import os + for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')): + # if s > 3: + # break + List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i)) + f = 0 + frams = 0 + for s, n in enumerate(List): + if frams != 0: + print('dsds') + for j in range(f, frams): + print(j ,'lllllll') + frame_dict[j] = None + cap = cv2.VideoCapture(n) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frams = frameNum + info_dict[s] = frameNum + for i in range(frameNum): + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame_dict[f] = frame + f += 1 + + +class Window(QLabel): + def __init__(self): + super(Window, self).__init__() + + self.cacheInfo = {} + self.setAlignment(Qt.AlignCenter) + # self.setFixedSize(1280, 720) + self.cap = cv2.VideoCapture(r"C:\Users\ARTIST\Pictures\f\render_d20_050_V001.mov") + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + ret, frame = self.cap.read() + if ret: + self.capheight, self.capwidth, bytesPerComponent = frame.shape + self.capbutedperline = 3 * self.capwidth + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + qimg = QImage( + frame.data, + self.capwidth, + self.capheight, + self.capbutedperline, + QImage.Format.Format_RGB888, + ) + pixmap = QPixmap.fromImage(qimg).scaled(1280, 720) + self.setPixmap(pixmap) + self.get() + self.cacheThread = CacheThread() + # self.cacheThread.cachedone.connect(self.cache_done) + # self.cacheThread.start() + # self.cacheFrames = {} + self.timer = QTimer() + self.timer.setInterval(1000 // 24) # 设置定时器间隔,以确保每秒触发24次 + self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数 + self.counter = 0 + # time.sleep(3) + manager = Manager() + self.cacheFrames = manager.dict() + p = Process(target=process_frame, args=(self.cacheInfo, self.cacheFrames,)) + p.start() + self.timer.start() + self.sss = Get(self.cacheFrames, self.counter) + self.sss.start() + + # @pyqtSlot() + # def generate_number(self): + # # print(self.cacheFrames) + # if self.counter in self.cacheFrames: + # qimg = QImage( + # cv2.imdecode(np.frombuffer(self.cacheFrames.get(self.counter), np.uint8), cv2.IMREAD_COLOR), + # self.capwidth, + # self.capheight, + # self.capbutedperline, + # QImage.Format.Format_RGB888, + # ) + # pixmap = QPixmap.fromImage(qimg).scaled(1280, 720) + # self.setPixmap(pixmap) + # self.counter += 1 + + @pyqtSlot() + def generate_number(self): + if self.counter in self.cacheFrames: + frame = self.cacheFrames[self.counter] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + self.setPixmap(pixmap) + self.counter += 1 + + def cache_done(self, frame, data): + print('cache done', data.data) + # self.cacheFrames = self.cacheThread.cacheFrames + # self.cacheFrames[frame] = data + # self.capheight = self.cacheThread.capheight + # self.capwidth = self.cacheThread.capwidth + # self.capbutedperline = self.cacheThread.capbutedperline + # self.timer.start() + + def get(self): + List = [] + import os + for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')): + # if s > 3: + # break + List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i)) + f = 0 + for s, n in enumerate(List): + cap = cv2.VideoCapture(n) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.cacheInfo[s] = frameNum + print(self.cacheInfo) + + def mousePressEvent(self, ev): + x = ev.pos().x() + i = int(float(x)/self.width() * sum([self.cacheInfo.get(i) for i in self.cacheInfo.keys()])) + if i in self.cacheFrames: + self.counter = i + + +class Get(QThread): + num = pyqtSignal(int) + + def __init__(self, info=None, counter=None): + super(Get, self).__init__() + + self.info = info + self.counter = counter + + def run(self) -> None: + i = 0 + while True: + # try: + for s in range(self.counter-10): + print('y', s) + if s < self.counter-20: + continue + # try: + self.info[s] = None + # except Exception as e: + # print('e', e) + # except Exception as a: + # print('a', a) + + if i in self.info: + print(len(self.info)) + self.num.emit(len(self.info)) + + +class CacheThread(QThread): + cachedone = pyqtSignal(int, object) + + def __init__(self): + super(CacheThread, self).__init__() + + self.cacheFrames = {} + + def run(self) -> None: + List = [] + + import os + for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')): + if s > 3: + break + List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i)) + + f = 0 + for n in List: + self.cap = cv2.VideoCapture(n) + self.frameNum = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + for i in range(self.frameNum): + ret, frame = self.cap.read() + if ret: + frame = cv2.resize(frame, (1280, 720)) + self.capheight, self.capwidth, bytesPerComponent = frame.shape + self.capbutedperline = 3 * self.capwidth + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # self.cacheFrames[f+1] = frame + self.cachedone.emit(f+1, frame) + f += 1 + print(f) + # self.cachedone.emit() + self.cap.release() + + +if __name__ == '__main__': + freeze_support() + app = QApplication(sys.argv) + widget = Window() + widget.show() + app.exec_() + + + +# ++++++++++++++++++++++++++++++++++++++++++ + +import random +import threading +import time +import cv2 +from PyQt5.QtGui import * +from PyQt5.QtCore import * +from PyQt5.QtWidgets import * +import sys +from multiprocessing import Process, Manager, Value, freeze_support +from PyQt5.QtCore import QThread, pyqtSignal +from PyQt5.QtWidgets import QApplication +import pyaudio +from moviepy.editor import VideoFileClip + + +def get(frame, data): + for key, value in data.items(): + if frame in value[1]: + return key, value[0] + +def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0): + while True: + if path: + video = VideoFileClip(path) + cap = cv2.VideoCapture(path) + fps = cap.get(cv2.CAP_PROP_FPS) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb") + fps = cap.get(cv2.CAP_PROP_FPS) + audio = video.audio + if not audio: + au_if.value = 0 + continue + p = pyaudio.PyAudio() + rat = audio.fps + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=rat, + output=True) + # 计算开始位置的帧数 + + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + fr = int(rat/fps) + if (i % fr) == 0: + if i == 0: + print('event', i) + event.value = 1 + time.sleep(1.5) + else: + time_.value = i + frame.value += 1 + if (i % rat) == 0: + print('miao', s) + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +class T(QThread): + frame = pyqtSignal() + def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None): + super().__init__() + + self.cache = cache + self.n_playframe = n_playframe + self.frames1 = frames1 + self.playframe = playframe + self.time = time + + def run(self): + while True: + # print('bububuubub') + if self.playframe.value in self.cache.keys(): + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + else: + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + # self.time.value = self.playframe.value + # for i in range(2): + # pass + # # print(1111, self.n_playframe.value, self.frames1.value) + # # self.frame.emit() + # self.n_playframe.value = self.frames1.value + + +class PlayAudio(QThread): + frame = pyqtSignal() + + def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0): + super().__init__() + + self.cache = cache + self.playlist = playlist + self.playframe = playframe + self.n_playframe = n_playframe + self.time = time + self.select = select + self.test = test + self.start_time = start_time + self.timer = QTimer() + cap = cv2.VideoCapture(self.playlist[self.select.value]) + self.fps = cap.get(cv2.CAP_PROP_FPS) + print(self.fps) + self.timer.setInterval(1000 // self.fps) + self.timer.timeout.connect(self.Es) + # self.timer.start() + self.audio = None + self.stream = None + self.buffer = [] + self.sss = False + self.au_event = Manager().Value("i", 0) + self.frames = Manager().Value("i", 0) + self.frames1 = Manager().Value("i", 0) + self.frames2 = 0 + self.au_time = Manager().Value("i", 0) + self.if_au = Manager().Value("i", 1) + self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au)) + self.p.start() + self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache) + self.t.frame.connect(self.Es) + while True: + if self.au_event.value: + # self.timer.start() + self.frame.emit() + self.playframe.value += 1 + self.t.start() + self.au_event.value = 0 + break + elif self.if_au.value == 0: + self.timer.start() + break + + def set(self, fps): + # self.timer.stop() + self.timer.setInterval(1000 // int(fps)) + # self.timer.start() + + # def run(self): + # index = None + # p = None + # if self.sss: + # return + # # while True: + # # # print(self.select.value) + # # # print(self.playlist[self.select.value]) + # # if index != self.select.value: + # # if self.stream: + # # self.stream.close() + # # self.buffer = [] + # # self.audio = None + # # self.stream = None + # # # if p: + # # # print('clor') + # # # p.terminate() + # # + # # index = self.select.value + # # video = VideoFileClip(self.playlist[self.select.value]) + # # self.audio = video.audio + # # if self.audio: + # # rat = self.audio.fps + # # + # # p = pyaudio.PyAudio() + # # # 创建输出流 + # # self.stream = p.open(format=pyaudio.paFloat32, + # # channels= self.audio.nchannels, + # # rate=rat, + # # output=True) + # # # print(playlist[select.value]) + # # # 计算开始位置的帧数 + # # start_frame = self.start_time + # # + # # # print(rat, 'rat') + # # # 设置缓冲区大小 + # # buffer_size = 1024 + # # else: + # # # print('play') + # # + # # if self.audio: + # # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)): + # # # print(i) + # # fr = int(rat/self.fps) + # # if (i % fr) == 0: + # # pass + # # # print('dddddddddddddd') + # # # self.frame.emit() + # # # print(self.cou) + # # if i >= start_frame: + # # self.buffer.append(chunk.astype('float32').tostring()) + # # if len(self.buffer) >= buffer_size: + # # self.stream.write(b''.join(self.buffer)) + # # self.buffer = [] + # # # print(stream.get_time()) + # # # 写入剩余数据 + # # if self.buffer: + # # self.stream.write(b''.join(self.buffer)) + # # p.terminate() + + def run(self): + while True: + if self.frames1.value != self.frames.value: + self.frames1.value = self.frames.value + self.playframe.value += 1 + # print('相差', self.frames.value, self.playframe.value) + + # print(self.frames1) + self.frame.emit() + # print('run') + # self.playframe. + + def Es(self): + pass + # print('es') + # self.terminate() + # while True: + # if abs(self.frames1 - self.n_playframe.value): + # for i in range(abs(self.frames1 - self.n_playframe.value)): + # print(1111) + self.frame.emit() + # if not self.if_au.value: + # self.frame.emit() + # return + # self.frames2 += 1 + # if self.frames1 != self.frames2: + # print(self.frames1, self.frames2, self.frames.value, self.au_time) + # self.frame.emit() + # if self.audio: + # return + # if self.frames1 != self.frames.value: + # return + # self.frame.emit() + # while True: + + +def play(playFrame, buff): + p = pyaudio.PyAudio() + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + while True: + # # print(playFrame.value, 'bdddd') + # if playFrame.value%25 == 0: + stream.write(b''.join(buff[int(playFrame.value)])) + # # print(self.playFrame.value, dir(self.buff), self.buff.keys()) + + +def play_audios(playlist, select, test, start_time=0): + + index = None + p = None + while True: + + # print(playlist[select.value]) + if index != select.value: + if p: + p.terminate() + index = select.value + video = VideoFileClip(playlist[select.value]) + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # print(playlist[select.value]) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + else: + # print('play') + + if audio: + o = 0 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if (i%1764) == 0: + test.value = o + o += 1 + # print(o) + if i >= start_frame: + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def play_audio(path=None, start_time=0): + if path: + video = VideoFileClip(path) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + if i % (44100 / 25) == 0: + print(s, 'miao') + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def process_frame(playlist, playFrame, playindex, select, select_status, time, cache, frame_dict, playend, rod, hua): + # playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # print(playlist) + f = 1 + op = time.value + mov = select.value + while True: + if mov == (len(playlist)-0): + if not playend.value: + select_status.value = 0 + mov = select.value + op = time.value + f = time.value + cap.release() + frame_dict.clear() + continue + if f in frame_dict.keys(): + f += 1 + continue + playindex.value = mov + # print(select.value, mov) + # if select.value != mov and select_status.value: + # cap.release() + # mov = select.value + # frame_dict.clear() + # # select_status.value = 0 + # if time.value != op: + # cap.release() + # op = time.value + # f = time.value + # playFrame.value = time.value + # frame_dict.clear() + # print(time.value, op, playFrame.value, len(frame_dict), mov) + # print(mov, select.value, playlist[mov], frame_dict.keys()) + print('op' ,op, playindex.value) + # for s, n in enumerate(List[:]): + # if frams != 0: + # # print('dsds') + # for j in range(f, frams): + # # print(j ,'lllllll') + # frame_dict[j] = None + cap = cv2.VideoCapture(playlist[mov]) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.set(cv2.CAP_PROP_POS_FRAMES, op) + + # print('cap', frameNum) + while cap.isOpened(): + if rod.value: + continue + # # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd') + if select_status.value and hua.value == 0: + # print('sssss', select.value, time.value, select_status.value) + select_status.value = 0 + mov = select.value + op = time.value + f = time.value + cap.release() + frame_dict.clear() + break + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # if frameNum < 220: + # ret, frame = cap.read() + # if ret: + # # if abs(f - playFrame.value) == 100: + # # if playFrame.value > 50: + # # if abs(f - playFrame.value) == 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # # else: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # cache.value = len(frame_dict) + playFrame.value + # # elif f < 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # else: + # mov += 1 + # op = 0 + # break + + if len(frame_dict) < 500: + ret, frame = cap.read() + if ret: + + # if abs(f - playFrame.value) == 100: + # if playFrame.value > 50: + # if abs(f - playFrame.value) == 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame_dict[f] = frame + f += 1 + cache.value = len(frame_dict) + playFrame.value + # print('duqu') + # print('ret', ret, f, len(frame_dict)) + # elif f < 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + else: + print('else', op, playindex.value) + if mov == (len(playlist)-1): + playend.value = 1 + mov += 1 + op = 0 + break + else: + continue + + + + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # frams = frameNum + # info_dict[s] = frameNum + # for i in range(frameNum): + # while True: + # if abs(f - playFrame.value) == 100: + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + # continue + + +class Window(QLabel): + def __init__(self): + super(Window, self).__init__() + + + self.select_ = None + # time.sleep(3) + manager = Manager() + self.cacheFrames = manager.dict() + manager = Manager() + self.cacheInfo = manager.dict() + manager = Manager() + self.playFrame = manager.Value('i', 1) + self.n_playFrame = manager.Value('i', 1) + self.playindex = manager.Value('i', 0) + self.playindexing = manager.Value('i', 0) + self.select = manager.Value('i', 0) + self.select_status = manager.Value('i', 0) + self.tiems = manager.Value('i', 0) + self.cache = manager.Value('i', 0) + self.playList = manager.list() + self.playend = manager.Value('i', 0) + self.rod = manager.Value('i', 0) + self.test = manager.Value('i', 0) + self.hua = manager.Value('i', 0) + self.playInit = manager.dict() + import os + for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i)) + print(self.playList) + # self.playList.pop(0) + # self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb') + # manager = Manager() + # self.buff = manager.dict() + # self.generate_number1() + + # self.cacheInfo = {} + self.setAlignment(Qt.AlignCenter) + self.resize(1280, 720) + self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb") + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + fps = self.cap.get(cv2.CAP_PROP_FPS) + self.get() + # self.timer = QTimer() + # self.timer1 = QTimer() + # self.timer.setInterval(1000 // fps) # 设置定时器间隔,以确保每秒触发24次 + # self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数 + + time.sleep(5) + for i in range(1): + p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua)) + p.start() + # self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test)) + + # time.sleep(2) + # self.timer.start() + # self.ps.start() + # self.timer1.start() + # self.sss = Get(self.cache) + # self.sss.start() + + self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames) + self.yy.frame.connect(self.generate_number) + self.yy.start() + + @pyqtSlot() + def generate_number(self): + if self.playend.value and self.playFrame.value == self.cacheFrames.keys()[-1]+1: + # print('end', self.playFrame.value, self.cacheFrames.keys()) + return + # if self.test.value: + # # print(self.playFrame.value, self.test.value) + # self.playFrame.value += self.test.value - 1 + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + # self.setPixmap(pixmap) + # # del frame + # # del pixmap + # # del qImg + # if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50: + # # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300) + # self.cacheFrames.pop(self.playFrame.value-51) + # return + # print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() ) + if self.playFrame.value not in self.cacheFrames: + if len(self.cacheFrames.keys()): + if self.playFrame.value > self.cacheFrames.keys()[-1]: + self.playFrame.value = self.cacheFrames.keys()[1] + elif self.playFrame.value < self.cacheFrames.keys()[0]: + self.playFrame.value = self.cacheFrames.keys()[1] + print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value) + return + if self.playFrame.value in self.cacheFrames: + while True: + try: + frame = self.cacheFrames[self.playFrame.value] + break + except: + continue + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + self.setPixmap(pixmap) + # del frame + # del pixmap + # del qImg + if self.playFrame.value-self.tiems.value > 250 and len(self.cacheFrames) > 250 and self.playFrame.value-251 in self.cacheFrames.keys(): + # print(self.playFrame.value, self.cacheFrames.keys()) + self.cacheFrames.pop(self.cacheFrames.keys()[0]) + if self.playindexing.value == 0: + if self.playFrame.value > self.cacheInfo[self.playindexing.value]: + self.playindexing.value += 1 + elif self.playindexing.value == len(self.cacheInfo.keys())-1: + pass + else: + if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + self.playindexing.value += 1 + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)])) + # elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)])) + # self.playindexing.value += 1 + if self.playindexing.value == 0: + # print('当前帧0', + # self.playindexing.value, self.playFrame.value, + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # self.playFrame.value) + self.n_playFrame.value = self.playFrame.value + dframe = self.playFrame.value + zframe = self.playFrame.value + else: + if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]): + pass + # print('当前帧点击', + # '\n播放',self.playindexing.value, + # '\n当前',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value + 1 + dframe = self.playFrame.value + 1 + zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1 + else: + # print('当前帧1', + # '\n播放',self.playindexing.value, + # '\n帧',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + zframe = self.playFrame.value + # if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))): + # print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # else: + # print('当前帧1', self.playindexing.value, self.playFrame.value, + # (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # print(self.playFrame.value) + print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value) + self.playFrame.value += 1 + + + def generate_number1(self): + + s = Process(target=play_audio) + s.start() + + def get(self): + p = 1 + ps = 1 + for s, n in enumerate(self.playList): + cap = cv2.VideoCapture(n) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frameNum1 = frameNum + ps + self.cacheInfo[s] = frameNum + self.playInit[s] = [[1, frameNum], [ps, frameNum1]] + ps = frameNum1 + 1 + print(self.playInit.items()) + + def mousePressEvent(self, ev): + # self.timer.stop() + # if True: + # return + if ev.button() == Qt.LeftButton: + # print("Left mouse button pressed") + i = random.randint(0, len(self.playList)-1) + self.select_ = i + # s = random.randint(0, self.cacheInfo[i]) + x = ev.pos().x() + ip = int(float(x) / self.width() * self.cacheInfo[i]) + self.select_status.value = 1 + print('select', i, ip, self.cacheInfo[i]) + # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames, + # self.cacheFrames.keys()) + self.select.value = i + self.tiems.value = ip + self.playFrame.value = ip + self.playend.value = 0 + self.playindexing.value = i + # if not self.timer.isActive(): + # self.timer.start() + elif ev.button() == Qt.RightButton: + # print("Right mouse button pressed") + if self.yy.timer.isActive(): + self.yy.timer.stop() + print(self.playInit[3]) + frame = random.randint(0, self.playInit[self.playInit.keys()-1][1][1]) + print(frame) + # if get() + if self.playFrame.value in self.cacheFrames: + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + frame = self.cacheFrames[self.playFrame.value] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # 假设qImg是您的QImage对象 + file_path = "%s.jpg" % self.playFrame.value # 保存图片的文件路径 + + # 保存QImage对象为图片文件 + qImg.save(file_path) + # print('stop') + else: + # print('start') + self.yy.timer.start() + + def mouseMoveEvent(self, ev): + self.yy.timer.stop() + x = ev.pos().x() + ip = int(float(x) / self.width() * self.cacheInfo[self.select_]) + # self.select_status.value = 1 + # print('select', self.select_, ip, self.cacheInfo[self.select_]) + self.tiems.value = ip - 1 + self.playFrame.value = ip - 1 + self.hua.value = 1 + print(self.cacheFrames.keys()) + if self.playFrame.value in self.cacheFrames: + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + frame = self.cacheFrames[self.playFrame.value] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + self.setPixmap(pixmap) + super().mouseMoveEvent(ev) + + def mouseReleaseEvent(self, ev): + self.hua.value = 0 + cap = cv2.VideoCapture(self.playList[self.select.value]) + fps = cap.get(cv2.CAP_PROP_FPS) + self.yy.set(fps) + # time.sleep(1) + self.yy.timer.start() + super().mouseReleaseEvent(ev) + + def mouseDoubleClickEvent(self, a0): + if self.rod.value: + self.rod.value = 0 + else: + self.rod.value = 1 + super().mouseDoubleClickEvent(a0) + + def closeEvent(self, a0): + self.p.kill() + + +class Get(QThread): + num = pyqtSignal(int) + + def __init__(self, cache=None): + super(Get, self).__init__() + + self.cache = cache + + def run(self) -> None: + while True: + self.cache.value + + +if __name__ == '__main__': + freeze_support() + app = QApplication([]) + widget = Window() + widget.show() + # cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # print(frameNum) + app.exec_() + + + + +# ++++++++++++++++++++++++++++++++++++++++++++++++++++ + + +import random +import threading +import time +import cv2 +from PyQt5.QtGui import * +from PyQt5.QtCore import * +from PyQt5.QtWidgets import * +import sys +from multiprocessing import Process, Manager, Value, freeze_support +from PyQt5.QtCore import QThread, pyqtSignal +from PyQt5.QtWidgets import QApplication +import pyaudio +from moviepy.editor import VideoFileClip + + +def get(frame, data): + for key, value in data.items(): + if frame in list(range(value[1][0], value[1][1] + 1)): + if key == 0: + return key, value[0], frame - value[1][0] + 1 + else: + return key, value[0], frame - value[1][0] + +def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0): + while True: + if path: + video = VideoFileClip(path) + audio = video.audio + if not audio: + au_if.value = 0 + continue + cap = cv2.VideoCapture(path) + fps = cap.get(cv2.CAP_PROP_FPS) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + audio = video.audio + if not audio: + au_if.value = 0 + continue + cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb") + fps = cap.get(cv2.CAP_PROP_FPS) + + + p = pyaudio.PyAudio() + rat = audio.fps + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=rat, + output=True) + # 计算开始位置的帧数 + + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + fr = int(rat/fps) + if (i % fr) == 0: + if i == 0: + # print('event', i) + event.value = 1 + time.sleep(1.5) + else: + time_.value = i + frame.value += 1 + if (i % rat) == 0: + # print('miao', s) + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +class T(QThread): + frame = pyqtSignal() + def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None): + super().__init__() + + self.cache = cache + self.n_playframe = n_playframe + self.frames1 = frames1 + self.playframe = playframe + self.time = time + + def run(self): + while True: + # print('bububuubub') + if self.playframe.value in self.cache.keys(): + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + else: + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + # self.time.value = self.playframe.value + # for i in range(2): + # pass + # # print(1111, self.n_playframe.value, self.frames1.value) + # # self.frame.emit() + # self.n_playframe.value = self.frames1.value + + +class PlayAudio(QThread): + frame = pyqtSignal() + + def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0): + super().__init__() + + self.cache = cache + self.playlist = playlist + self.playframe = playframe + self.n_playframe = n_playframe + self.time = time + self.select = select + self.test = test + self.start_time = start_time + self.timer = QTimer() + cap = cv2.VideoCapture(self.playlist[self.select.value]) + self.fps = cap.get(cv2.CAP_PROP_FPS) + print(self.fps) + self.timer.setInterval(1000 // self.fps) + self.timer.timeout.connect(self.Es) + # self.timer.start() + self.audio = None + self.stream = None + self.buffer = [] + self.sss = False + self.au_event = Manager().Value("i", 0) + self.frames = Manager().Value("i", 0) + self.frames1 = Manager().Value("i", 0) + self.frames2 = 0 + self.au_time = Manager().Value("i", 0) + self.if_au = Manager().Value("i", 1) + if not self.sss: + # self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au)) + # self.p.start() + + self.p = threading.Thread(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au)) + self.p.start() + + self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache) + self.t.frame.connect(self.Es) + while True: + if self.sss: + self.timer.start() + break + if self.au_event.value: + # self.timer.start() + self.frame.emit() + self.playframe.value += 1 + self.t.start() + self.au_event.value = 0 + break + elif self.if_au.value == 0: + self.timer.start() + break + + def set(self, fps): + # self.timer.stop() + self.timer.setInterval(1000 // int(fps)) + # self.timer.start() + + # def run(self): + # index = None + # p = None + # if self.sss: + # return + # # while True: + # # # print(self.select.value) + # # # print(self.playlist[self.select.value]) + # # if index != self.select.value: + # # if self.stream: + # # self.stream.close() + # # self.buffer = [] + # # self.audio = None + # # self.stream = None + # # # if p: + # # # print('clor') + # # # p.terminate() + # # + # # index = self.select.value + # # video = VideoFileClip(self.playlist[self.select.value]) + # # self.audio = video.audio + # # if self.audio: + # # rat = self.audio.fps + # # + # # p = pyaudio.PyAudio() + # # # 创建输出流 + # # self.stream = p.open(format=pyaudio.paFloat32, + # # channels= self.audio.nchannels, + # # rate=rat, + # # output=True) + # # # print(playlist[select.value]) + # # # 计算开始位置的帧数 + # # start_frame = self.start_time + # # + # # # print(rat, 'rat') + # # # 设置缓冲区大小 + # # buffer_size = 1024 + # # else: + # # # print('play') + # # + # # if self.audio: + # # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)): + # # # print(i) + # # fr = int(rat/self.fps) + # # if (i % fr) == 0: + # # pass + # # # print('dddddddddddddd') + # # # self.frame.emit() + # # # print(self.cou) + # # if i >= start_frame: + # # self.buffer.append(chunk.astype('float32').tostring()) + # # if len(self.buffer) >= buffer_size: + # # self.stream.write(b''.join(self.buffer)) + # # self.buffer = [] + # # # print(stream.get_time()) + # # # 写入剩余数据 + # # if self.buffer: + # # self.stream.write(b''.join(self.buffer)) + # # p.terminate() + + def run(self): + while True: + if self.frames1.value != self.frames.value: + self.frames1.value = self.frames.value + self.playframe.value += 1 + # print('相差', self.frames.value, self.playframe.value) + + # print(self.frames1) + self.frame.emit() + # print('run') + # self.playframe. + + def Es(self): + pass + # print('es') + # self.terminate() + # while True: + # if abs(self.frames1 - self.n_playframe.value): + # for i in range(abs(self.frames1 - self.n_playframe.value)): + # print(1111) + self.frame.emit() + # if not self.if_au.value: + # self.frame.emit() + # return + # self.frames2 += 1 + # if self.frames1 != self.frames2: + # print(self.frames1, self.frames2, self.frames.value, self.au_time) + # self.frame.emit() + # if self.audio: + # return + # if self.frames1 != self.frames.value: + # return + # self.frame.emit() + # while True: + + +def play(playFrame, buff): + p = pyaudio.PyAudio() + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + while True: + # # print(playFrame.value, 'bdddd') + # if playFrame.value%25 == 0: + stream.write(b''.join(buff[int(playFrame.value)])) + # # print(self.playFrame.value, dir(self.buff), self.buff.keys()) + + +def play_audios(playlist, select, test, start_time=0): + + index = None + p = None + while True: + + # print(playlist[select.value]) + if index != select.value: + if p: + p.terminate() + index = select.value + video = VideoFileClip(playlist[select.value]) + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # print(playlist[select.value]) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + else: + # print('play') + + if audio: + o = 0 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if (i%1764) == 0: + test.value = o + o += 1 + # print(o) + if i >= start_frame: + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def play_audio(path=None, start_time=0): + if path: + video = VideoFileClip(path) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + if i % (44100 / 25) == 0: + print(s, 'miao') + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def process_frame(playlist, playFrame, playindex, select, select_status, times, cache, frame_dict, playend, rod, hua): + # playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # print(playlist) + f = 1 + op = times.value + mov = select.value + while True: + if mov == (len(playlist)-0): + if not playend.value: + select_status.value = 0 + mov = select.value + op = times.value + f = times.value + cap.release() + frame_dict.clear() + continue + playindex.value = mov + # print(select.value, mov) + # if select.value != mov and select_status.value: + # cap.release() + # mov = select.value + # frame_dict.clear() + # # select_status.value = 0 + # if time.value != op: + # cap.release() + # op = time.value + # f = time.value + # playFrame.value = time.value + # frame_dict.clear() + # print(time.value, op, playFrame.value, len(frame_dict), mov) + # print(mov, select.value, playlist[mov], frame_dict.keys()) + # print('op' ,op, playindex.value) + # for s, n in enumerate(List[:]): + # if frams != 0: + # # print('dsds') + # for j in range(f, frams): + # # print(j ,'lllllll') + # frame_dict[j] = None + cap = cv2.VideoCapture(playlist[mov]) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.set(cv2.CAP_PROP_POS_FRAMES, op) + + # print('cap', frameNum) + while cap.isOpened(): + if rod.value: + continue + # # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd') + if select_status.value and hua.value == 0: + # time.sleep(3) + # print('sssss', select.value, time.value, select_status.value) + select_status.value = 0 + mov = select.value + op = times.value + f = times.value + cap.release() + frame_dict.clear() + break + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # if frameNum < 220: + # ret, frame = cap.read() + # if ret: + # # if abs(f - playFrame.value) == 100: + # # if playFrame.value > 50: + # # if abs(f - playFrame.value) == 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # # else: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # cache.value = len(frame_dict) + playFrame.value + # # elif f < 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # else: + # mov += 1 + # op = 0 + # break + + if len(frame_dict) < 120: + ret, frame = cap.read() + if ret: + + # if abs(f - playFrame.value) == 100: + # if playFrame.value > 50: + # if abs(f - playFrame.value) == 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame_dict[f] = frame + f += 1 + cache.value = len(frame_dict) + playFrame.value + # print('duqu') + # print('ret', ret, f, len(frame_dict)) + # elif f < 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + else: + # print('else', op, playindex.value) + if mov == (len(playlist)-1): + playend.value = 1 + mov += 1 + op = 0 + break + else: + continue + + + + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # frams = frameNum + # info_dict[s] = frameNum + # for i in range(frameNum): + # while True: + # if abs(f - playFrame.value) == 100: + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + # continue + + +class Window(QLabel): + def __init__(self): + super(Window, self).__init__() + + + self.select_ = None + # time.sleep(3) + manager = Manager() + self.cacheFrames = manager.dict() + manager = Manager() + self.cacheInfo = manager.dict() + manager = Manager() + self.playFrame = manager.Value('i', 1) + self.n_playFrame = manager.Value('i', 1) + self.playindex = manager.Value('i', 0) + self.playindexing = manager.Value('i', 0) + self.select = manager.Value('i', 0) + self.select_status = manager.Value('i', 0) + self.tiems = manager.Value('i', 0) + self.cache = manager.Value('i', 0) + self.playList = manager.list() + self.playend = manager.Value('i', 0) + self.rod = manager.Value('i', 0) + self.test = manager.Value('i', 0) + self.hua = manager.Value('i', 0) + self.playInit = manager.dict() + import os + for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i)) + print(self.playList) + # self.playList.pop(0) + # self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb') + # manager = Manager() + # self.buff = manager.dict() + # self.generate_number1() + + # self.cacheInfo = {} + self.setAlignment(Qt.AlignCenter) + self.resize(1280, 720) + self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb") + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + fps = self.cap.get(cv2.CAP_PROP_FPS) + self.get() + # self.timer = QTimer() + # self.timer1 = QTimer() + # self.timer.setInterval(1000 // fps) # 设置定时器间隔,以确保每秒触发24次 + # self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数 + + # time.sleep(5) + for i in range(1): + self.p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua)) + self.p.start() + # self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test)) + + # time.sleep(2) + # self.timer.start() + # self.ps.start() + # self.timer1.start() + # self.sss = Get(self.cache) + # self.sss.start() + + self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames) + self.yy.frame.connect(self.generate_number) + self.yy.start() + + @pyqtSlot() + def generate_number(self): + # print(self.playend.value, self.playFrame.value , self.playInit[self.playInit.keys()[-1]][-1][-1]+1) + # print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() ) + if self.playFrame.value >= self.playInit[self.playInit.keys()[-1]][-1][-1]+1: + # print('end', self.playFrame.value, self.cacheFrames.keys()) + self.p.terminate() + self.cacheFrames.clear() + print('dsdsdsdsdsds') + return + # if self.test.value: + # # print(self.playFrame.value, self.test.value) + # self.playFrame.value += self.test.value - 1 + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + # self.setPixmap(pixmap) + # # del frame + # # del pixmap + # # del qImg + # if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50: + # # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300) + # self.cacheFrames.pop(self.playFrame.value-51) + # return + if self.playFrame.value not in self.cacheFrames: + if len(self.cacheFrames.keys()) and not self.playend.value: + try: + if self.playFrame.value > self.cacheFrames.keys()[-1]: + self.playFrame.value = self.cacheFrames.keys()[0] + elif self.playFrame.value < self.cacheFrames.keys()[0]: + self.playFrame.value = self.cacheFrames.keys()[0] + except: + self.playFrame.value = self.tiems.value + + # print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value) + return + if self.playFrame.value in self.cacheFrames: + try: + frame = self.cacheFrames[self.playFrame.value] + except: + return + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + self.setPixmap(pixmap) + # file_path = "%s_%s.jpg" % (self.playList[self.playindexing], self.playFrame.value) # 保存图片的文件路径 + # + # # 保存QImage对象为图片文件 + # qImg.save(file_path) + # del frame + # del pixmap + # del qImg + # print('key1') + if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) > 50 and self.playFrame.value-51 in self.cacheFrames.keys(): + # print(self.playFrame.value, self.cacheFrames.keys()) + self.cacheFrames.pop(self.cacheFrames.keys()[0]) + if self.playindexing.value == 0: + if self.playFrame.value > self.cacheInfo[self.playindexing.value]: + self.playindexing.value += 1 + elif self.playindexing.value == len(self.cacheInfo.keys())-1: + pass + else: + if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + self.playindexing.value += 1 + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)])) + # elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)])) + # self.playindexing.value += 1 + if self.playindexing.value == 0: + # print('当前帧0', + # self.playindexing.value, self.playFrame.value, + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # self.playFrame.value) + self.n_playFrame.value = self.playFrame.value + dframe = self.playFrame.value + zframe = self.playFrame.value + else: + if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]): + pass + # print('当前帧点击', + # '\n播放',self.playindexing.value, + # '\n当前',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value + 1 + dframe = self.playFrame.value + 1 + zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1 + else: + # print('当前帧1', + # '\n播放',self.playindexing.value, + # '\n帧',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + zframe = self.playFrame.value + # if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))): + # print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # else: + # print('当前帧1', self.playindexing.value, self.playFrame.value, + # (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # print(self.playFrame.value) + # print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value) + self.playFrame.value += 1 + # print(self.playFrame.value) + + + def generate_number1(self): + + s = Process(target=play_audio) + s.start() + + def get(self): + p = 1 + ps = 1 + for s, n in enumerate(self.playList): + cap = cv2.VideoCapture(n) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frameNum1 = frameNum + ps + self.cacheInfo[s] = frameNum + self.playInit[s] = [[1, frameNum], [ps, frameNum1]] + ps = frameNum1 + 1 + print(self.playInit.items()) + + def mousePressEvent(self, ev): + # self.timer.stop() + # if True: + # return + if ev.button() == Qt.LeftButton: + # # print("Left mouse button pressed") + # # i = random.randint(0, len(self.playList)-1) + # # self.select_ = i + # + # + # # # s = random.randint(0, self.cacheInfo[i]) + x = ev.pos().x() + frames = int(float(x) / self.width() * self.playInit[self.playInit.keys()[-1]][-1][-1]) + # print(frames) + # # self.select_status.value = 1 + # # print('select', i, ip, self.cacheInfo[i]) + # # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames, + # # # self.cacheFrames.keys()) + # + # # frames = random.randint(0, self.playInit[3][1][1]) + print(frames) + try: + print(self.playInit[self.playInit.keys()[-1]][-1][-1]) + except: + pass + if get(frames, self.playInit): + sele = get(frames, self.playInit) + print(sele) + # if frames in self.cacheFrames: + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[frames] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # # 假设qImg是您的QImage对象 + # file_path = "%s_%s.jpg" % (sele[-1], frames) # 保存图片的文件路径 + # + # # 保存QImage对象为图片文件 + # qImg.save(file_path) + # # print('stop') + # + + self.select_status.value = 1 + self.select_ = sele[0] + self.select.value = sele[0] + self.tiems.value = sele[-1] -1 + self.playFrame.value = sele[-1] -1 + self.playend.value = 0 + self.playindexing.value = sele[0] + # print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value) + # # if not self.timer.isActive(): + # # self.timer.start() + time.sleep(2) + print(frames, sele[-1], self.cacheFrames.keys()) + if sele[-1]-1 in self.cacheFrames: + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + frame = self.cacheFrames[sele[-1]-1] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # 假设qImg是您的QImage对象 + print('保存',self.playList, self.select.value, self.playList[self.select.value]) + file_path = "%s_%s_%s.jpg" % (self.playList[self.select.value].split('/')[-1], sele[-1], frames) # 保存图片的文件路径 + + # 保存QImage对象为图片文件 + qImg.save(file_path) + + # # print("Left mouse button pressed") + # i = random.randint(0, len(self.playList) - 1) + # self.select_ = i + # # s = random.randint(0, self.cacheInfo[i]) + # x = ev.pos().x() + # ip = int(float(x) / self.width() * self.cacheInfo[i]) + # self.select_status.value = 1 + # print('select', i, ip, self.cacheInfo[i]) + # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames, + # # self.cacheFrames.keys()) + # self.select.value = i + # self.tiems.value = ip + # self.playFrame.value = ip + # self.playend.value = 0 + # self.playindexing.value = i + # print('select', i, ip, self.cacheInfo[i]) + # print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value) + # # if not self.timer.isActive(): + # # self.timer.start() + + elif ev.button() == Qt.RightButton: + # print("Right mouse button pressed") + if self.yy.timer.isActive(): + self.yy.timer.stop() + # print(self.playInit[3]) + + else: + # print('start') + self.yy.timer.start() + + def mouseMoveEvent(self, ev): + # self.yy.timer.stop() + # # x = ev.pos().x() + # # ip = int(float(x) / self.width() * self.cacheInfo[self.select_]) + # # self.select_status.value = 1 + # # print('select', self.select_, ip, self.cacheInfo[self.select_]) + # # self.tiems.value = ip - 1 + # # self.playFrame.value = ip - 1 + # # self.hua.value = 1 + # # print(self.cacheFrames.keys()) + # if self.playFrame.value in self.cacheFrames: + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + # self.setPixmap(pixmap) + super().mouseMoveEvent(ev) + + def mouseReleaseEvent(self, ev): + # self.hua.value = 0 + print(self.playList[self.select.value]) + cap = cv2.VideoCapture(self.playList[self.select.value]) + fps = cap.get(cv2.CAP_PROP_FPS) + self.yy.set(fps) + # time.sleep(1) + self.yy.timer.start() + super().mouseReleaseEvent(ev) + + def mouseDoubleClickEvent(self, a0): + if self.rod.value: + self.rod.value = 0 + else: + self.rod.value = 1 + super().mouseDoubleClickEvent(a0) + + def closeEvent(self, a0): + # self.yy.terminate() + # self.yy.deleteLater() + + # self.p.terminate() + self.p.join() + # self.p.close() + # self.p.kill() + + # self.yy.p.terminate() + self.yy.p.join() + # self.yy.p.close() + # self.yy.p.kill() + super().closeEvent(a0) + # sys.exit() + + +class Get(QThread): + num = pyqtSignal(int) + + def __init__(self, cache=None): + super(Get, self).__init__() + + self.cache = cache + + def run(self) -> None: + while True: + self.cache.value + + +if __name__ == '__main__': + freeze_support() + app = QApplication([]) + widget = Window() + widget.show() + # cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # print(frameNum) + app.exec_() + + +# +++++++++++++++++++++++++++++++++++++++++++ + +import random +import threading +import time +import cv2 +from PyQt5.QtGui import * +from PyQt5.QtCore import * +from PyQt5.QtWidgets import * +import sys +from multiprocessing import Process, Manager, Value, freeze_support +from PyQt5.QtCore import QThread, pyqtSignal +from PyQt5.QtWidgets import QApplication +import pyaudio +from moviepy.editor import VideoFileClip + + +def get(frame, data): + for key, value in data.items(): + if frame in list(range(value[1][0], value[1][1] + 1)): + if key == 0: + return key, value[0], frame - value[1][0] + 1 + else: + return key, value[0], frame - value[1][0] + +def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0): + while True: + if path: + video = VideoFileClip(path) + audio = video.audio + if not audio: + au_if.value = 0 + continue + cap = cv2.VideoCapture(path) + fps = cap.get(cv2.CAP_PROP_FPS) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + audio = video.audio + if not audio: + au_if.value = 0 + continue + cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb") + fps = cap.get(cv2.CAP_PROP_FPS) + + + p = pyaudio.PyAudio() + rat = audio.fps + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=rat, + output=True) + # 计算开始位置的帧数 + + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + fr = int(rat/fps) + if (i % fr) == 0: + if i == 0: + # print('event', i) + event.value = 1 + time.sleep(1.5) + else: + time_.value = i + frame.value += 1 + if (i % rat) == 0: + # print('miao', s) + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +class T(QThread): + frame = pyqtSignal() + def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None): + super().__init__() + + self.cache = cache + self.n_playframe = n_playframe + self.frames1 = frames1 + self.playframe = playframe + self.time = time + + def run(self): + while True: + # print('bububuubub') + if self.playframe.value in self.cache.keys(): + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + else: + if self.playframe.value < self.frames1.value - 1: + self.playframe.value += 1 + elif self.playframe.value > self.frames1.value: + self.playframe.value -= 1 + # self.time.value = self.playframe.value + # for i in range(2): + # pass + # # print(1111, self.n_playframe.value, self.frames1.value) + # # self.frame.emit() + # self.n_playframe.value = self.frames1.value + + +class PlayAudio(QThread): + frame = pyqtSignal() + + def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0): + super().__init__() + + self.cache = cache + self.playlist = playlist + self.playframe = playframe + self.n_playframe = n_playframe + self.time = time + self.select = select + self.test = test + self.start_time = start_time + self.timer = QTimer() + cap = cv2.VideoCapture(self.playlist[self.select.value]) + self.fps = cap.get(cv2.CAP_PROP_FPS) + print(self.fps) + self.timer.setInterval(1000 // self.fps) + self.timer.timeout.connect(self.Es) + # self.timer.start() + self.audio = None + self.stream = None + self.buffer = [] + self.sss = False + self.au_event = Manager().Value("i", 0) + self.frames = Manager().Value("i", 0) + self.frames1 = Manager().Value("i", 0) + self.frames2 = 0 + self.au_time = Manager().Value("i", 0) + self.if_au = Manager().Value("i", 1) + if not self.sss: + self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au)) + self.p.start() + # + # self.p = threading.Thread(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au)) + # self.p.start() + + self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache) + self.t.frame.connect(self.Es) + while True: + if self.sss: + self.timer.start() + break + if self.au_event.value: + # self.timer.start() + self.frame.emit() + self.playframe.value += 1 + self.t.start() + self.au_event.value = 0 + break + elif self.if_au.value == 0: + self.timer.start() + break + + def set(self, fps): + # self.timer.stop() + self.timer.setInterval(1000 // int(fps)) + # self.timer.start() + + # def run(self): + # index = None + # p = None + # if self.sss: + # return + # # while True: + # # # print(self.select.value) + # # # print(self.playlist[self.select.value]) + # # if index != self.select.value: + # # if self.stream: + # # self.stream.close() + # # self.buffer = [] + # # self.audio = None + # # self.stream = None + # # # if p: + # # # print('clor') + # # # p.terminate() + # # + # # index = self.select.value + # # video = VideoFileClip(self.playlist[self.select.value]) + # # self.audio = video.audio + # # if self.audio: + # # rat = self.audio.fps + # # + # # p = pyaudio.PyAudio() + # # # 创建输出流 + # # self.stream = p.open(format=pyaudio.paFloat32, + # # channels= self.audio.nchannels, + # # rate=rat, + # # output=True) + # # # print(playlist[select.value]) + # # # 计算开始位置的帧数 + # # start_frame = self.start_time + # # + # # # print(rat, 'rat') + # # # 设置缓冲区大小 + # # buffer_size = 1024 + # # else: + # # # print('play') + # # + # # if self.audio: + # # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)): + # # # print(i) + # # fr = int(rat/self.fps) + # # if (i % fr) == 0: + # # pass + # # # print('dddddddddddddd') + # # # self.frame.emit() + # # # print(self.cou) + # # if i >= start_frame: + # # self.buffer.append(chunk.astype('float32').tostring()) + # # if len(self.buffer) >= buffer_size: + # # self.stream.write(b''.join(self.buffer)) + # # self.buffer = [] + # # # print(stream.get_time()) + # # # 写入剩余数据 + # # if self.buffer: + # # self.stream.write(b''.join(self.buffer)) + # # p.terminate() + + def run(self): + while True: + if self.frames1.value != self.frames.value: + self.frames1.value = self.frames.value + self.playframe.value += 1 + # print('相差', self.frames.value, self.playframe.value) + + # print(self.frames1) + self.frame.emit() + # print('run') + # self.playframe. + + def Es(self): + pass + # print('es') + # self.terminate() + # while True: + # if abs(self.frames1 - self.n_playframe.value): + # for i in range(abs(self.frames1 - self.n_playframe.value)): + # print(1111) + self.frame.emit() + # if not self.if_au.value: + # self.frame.emit() + # return + # self.frames2 += 1 + # if self.frames1 != self.frames2: + # print(self.frames1, self.frames2, self.frames.value, self.au_time) + # self.frame.emit() + # if self.audio: + # return + # if self.frames1 != self.frames.value: + # return + # self.frame.emit() + # while True: + + +def play(playFrame, buff): + p = pyaudio.PyAudio() + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + while True: + # # print(playFrame.value, 'bdddd') + # if playFrame.value%25 == 0: + stream.write(b''.join(buff[int(playFrame.value)])) + # # print(self.playFrame.value, dir(self.buff), self.buff.keys()) + + +def play_audios(playlist, select, test, start_time=0): + + index = None + p = None + while True: + + # print(playlist[select.value]) + if index != select.value: + if p: + p.terminate() + index = select.value + video = VideoFileClip(playlist[select.value]) + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # print(playlist[select.value]) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + else: + # print('play') + + if audio: + o = 0 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if (i%1764) == 0: + test.value = o + o += 1 + # print(o) + if i >= start_frame: + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def play_audio(path=None, start_time=0): + if path: + video = VideoFileClip(path) + else: + video = VideoFileClip("[J2] 銀魂 002.rmvb") + audio = video.audio + p = pyaudio.PyAudio() + + # 创建输出流 + stream = p.open(format=pyaudio.paFloat32, + channels=2, + rate=44100, + output=True) + # 计算开始位置的帧数 + start_frame = start_time + # 设置缓冲区大小 + buffer_size = 1024 + buffer = [] + s = 0 + # 逐帧播放音频 + for i, chunk in enumerate(audio.iter_frames(with_times=False)): + # print(i) + if i >= start_frame: + if i % (44100 / 25) == 0: + print(s, 'miao') + s += 1 + buffer.append(chunk.astype('float32').tostring()) + if len(buffer) >= buffer_size: + stream.write(b''.join(buffer)) + buffer = [] + # print(stream.get_time()) + # 写入剩余数据 + if buffer: + stream.write(b''.join(buffer)) + p.terminate() + + +def process_frame(playlist, playFrame, playindex, select, select_status, times, cache, frame_dict, playend, rod, hua): + # playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # print(playlist) + f = 1 + op = times.value + mov = select.value + while True: + if mov == (len(playlist)-0): + if not playend.value: + select_status.value = 0 + mov = select.value + op = times.value + f = times.value + cap.release() + frame_dict.clear() + continue + playindex.value = mov + # print(select.value, mov) + # if select.value != mov and select_status.value: + # cap.release() + # mov = select.value + # frame_dict.clear() + # # select_status.value = 0 + # if time.value != op: + # cap.release() + # op = time.value + # f = time.value + # playFrame.value = time.value + # frame_dict.clear() + # print(time.value, op, playFrame.value, len(frame_dict), mov) + # print(mov, select.value, playlist[mov], frame_dict.keys()) + # print('op' ,op, playindex.value) + # for s, n in enumerate(List[:]): + # if frams != 0: + # # print('dsds') + # for j in range(f, frams): + # # print(j ,'lllllll') + # frame_dict[j] = None + cap = cv2.VideoCapture(playlist[mov]) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.set(cv2.CAP_PROP_POS_FRAMES, op) + + # print('cap', frameNum) + while cap.isOpened(): + if rod.value: + continue + # # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd') + if select_status.value and hua.value == 0: + # time.sleep(3) + # print('sssss', select.value, time.value, select_status.value) + select_status.value = 0 + mov = select.value + op = times.value + f = times.value + cap.release() + frame_dict.clear() + break + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # if frameNum < 220: + # ret, frame = cap.read() + # if ret: + # # if abs(f - playFrame.value) == 100: + # # if playFrame.value > 50: + # # if abs(f - playFrame.value) == 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # # else: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # cache.value = len(frame_dict) + playFrame.value + # # elif f < 100: + # # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # frame_dict[f] = frame + # # f += 1 + # else: + # mov += 1 + # op = 0 + # break + + if len(frame_dict) < 120: + ret, frame = cap.read() + if ret: + + # if abs(f - playFrame.value) == 100: + # if playFrame.value > 50: + # if abs(f - playFrame.value) == 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame_dict[f] = frame + f += 1 + cache.value = len(frame_dict) + playFrame.value + # print('duqu') + # print('ret', ret, f, len(frame_dict)) + # elif f < 100: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + else: + # print('else', op, playindex.value) + if mov == (len(playlist)-1): + playend.value = 1 + mov += 1 + op = 0 + break + else: + continue + + + + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # frams = frameNum + # info_dict[s] = frameNum + # for i in range(frameNum): + # while True: + # if abs(f - playFrame.value) == 100: + # ret, frame = cap.read() + # if ret: + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # frame_dict[f] = frame + # f += 1 + # else: + # continue + + +class Window(QLabel): + def __init__(self): + super(Window, self).__init__() + + + self.select_ = None + # time.sleep(3) + manager = Manager() + self.cacheFrames = manager.dict() + manager = Manager() + self.cacheInfo = manager.dict() + manager = Manager() + self.playFrame = manager.Value('i', 1) + self.n_playFrame = manager.Value('i', 1) + self.playindex = manager.Value('i', 0) + self.playindexing = manager.Value('i', 0) + self.select = manager.Value('i', 0) + self.select_status = manager.Value('i', 0) + self.tiems = manager.Value('i', 0) + self.cache = manager.Value('i', 0) + self.playList = manager.list() + self.playend = manager.Value('i', 0) + self.rod = manager.Value('i', 0) + self.test = manager.Value('i', 0) + self.hua = manager.Value('i', 0) + self.playInit = manager.dict() + import os + for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i)) + print(self.playList) + # self.playList.pop(0) + # self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb') + # manager = Manager() + # self.buff = manager.dict() + # self.generate_number1() + + # self.cacheInfo = {} + self.setAlignment(Qt.AlignCenter) + self.resize(1280, 720) + self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb") + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + fps = self.cap.get(cv2.CAP_PROP_FPS) + self.get() + # self.timer = QTimer() + # self.timer1 = QTimer() + # self.timer.setInterval(1000 // fps) # 设置定时器间隔,以确保每秒触发24次 + # self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数 + + # time.sleep(5) + for i in range(1): + self.p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua)) + self.p.start() + # self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test)) + + # time.sleep(2) + # self.timer.start() + # self.ps.start() + # self.timer1.start() + # self.sss = Get(self.cache) + # self.sss.start() + + self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames) + self.yy.frame.connect(self.generate_number) + self.yy.start() + + @pyqtSlot() + def generate_number(self): + # print(self.playend.value, self.playFrame.value , self.playInit[self.playInit.keys()[-1]][-1][-1]+1) + # print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() ) + if self.playFrame.value >= self.playInit[self.playInit.keys()[-1]][-1][-1]+1: + # print('end', self.playFrame.value, self.cacheFrames.keys()) + self.p.terminate() + self.cacheFrames.clear() + print('dsdsdsdsdsds') + return + # if self.test.value: + # # print(self.playFrame.value, self.test.value) + # self.playFrame.value += self.test.value - 1 + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + # self.setPixmap(pixmap) + # # del frame + # # del pixmap + # # del qImg + # if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50: + # # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300) + # self.cacheFrames.pop(self.playFrame.value-51) + # return + if self.playFrame.value not in self.cacheFrames: + if len(self.cacheFrames.keys()) and not self.playend.value: + try: + if self.playFrame.value > self.cacheFrames.keys()[-1]: + self.playFrame.value = self.cacheFrames.keys()[0] + elif self.playFrame.value < self.cacheFrames.keys()[0]: + self.playFrame.value = self.cacheFrames.keys()[0] + except: + self.playFrame.value = self.tiems.value + + # print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value) + return + if self.playFrame.value in self.cacheFrames: + try: + frame = self.cacheFrames[self.playFrame.value] + except: + return + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + self.setPixmap(pixmap) + # file_path = "%s_%s.jpg" % (self.playList[self.playindexing], self.playFrame.value) # 保存图片的文件路径 + # + # # 保存QImage对象为图片文件 + # qImg.save(file_path) + # del frame + # del pixmap + # del qImg + # print('key1') + if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) > 50 and self.playFrame.value-51 in self.cacheFrames.keys(): + # print(self.playFrame.value, self.cacheFrames.keys()) + self.cacheFrames.pop(self.cacheFrames.keys()[0]) + if self.playindexing.value == 0: + if self.playFrame.value > self.cacheInfo[self.playindexing.value]: + self.playindexing.value += 1 + elif self.playindexing.value == len(self.cacheInfo.keys())-1: + pass + else: + if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + self.playindexing.value += 1 + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)])) + # elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]): + # print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)])) + # self.playindexing.value += 1 + if self.playindexing.value == 0: + # print('当前帧0', + # self.playindexing.value, self.playFrame.value, + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # self.playFrame.value) + self.n_playFrame.value = self.playFrame.value + dframe = self.playFrame.value + zframe = self.playFrame.value + else: + if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]): + pass + # print('当前帧点击', + # '\n播放',self.playindexing.value, + # '\n当前',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value + 1 + dframe = self.playFrame.value + 1 + zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1 + else: + # print('当前帧1', + # '\n播放',self.playindexing.value, + # '\n帧',self.playFrame.value, + # '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + # ) + self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + zframe = self.playFrame.value + # if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))): + # print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # else: + # print('当前帧1', self.playindexing.value, self.playFrame.value, + # (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), + # sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])))) + # print(self.playFrame.value) + # print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value) + self.playFrame.value += 1 + # print(self.playFrame.value) + + + def generate_number1(self): + + s = Process(target=play_audio) + s.start() + + def get(self): + p = 1 + ps = 1 + for s, n in enumerate(self.playList): + cap = cv2.VideoCapture(n) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + frameNum1 = frameNum + ps + self.cacheInfo[s] = frameNum + self.playInit[s] = [[1, frameNum], [ps, frameNum1]] + ps = frameNum1 + 1 + print(self.playInit.items()) + + def mousePressEvent(self, ev): + # self.timer.stop() + # if True: + # return + if ev.button() == Qt.LeftButton: + # # print("Left mouse button pressed") + # # i = random.randint(0, len(self.playList)-1) + # # self.select_ = i + # + # + # # # s = random.randint(0, self.cacheInfo[i]) + x = ev.pos().x() + frames = int(float(x) / self.width() * self.playInit[self.playInit.keys()[-1]][-1][-1]) + # print(frames) + # # self.select_status.value = 1 + # # print('select', i, ip, self.cacheInfo[i]) + # # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames, + # # # self.cacheFrames.keys()) + # + # # frames = random.randint(0, self.playInit[3][1][1]) + print(frames) + try: + print(self.playInit[self.playInit.keys()[-1]][-1][-1]) + except: + pass + if get(frames, self.playInit): + sele = get(frames, self.playInit) + print(sele) + # if frames in self.cacheFrames: + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[frames] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # # 假设qImg是您的QImage对象 + # file_path = "%s_%s.jpg" % (sele[-1], frames) # 保存图片的文件路径 + # + # # 保存QImage对象为图片文件 + # qImg.save(file_path) + # # print('stop') + # + + self.select_status.value = 1 + self.select_ = sele[0] + self.select.value = sele[0] + self.tiems.value = sele[-1] -1 + self.playFrame.value = sele[-1] -1 + self.playend.value = 0 + self.playindexing.value = sele[0] + # print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value) + # # if not self.timer.isActive(): + # # self.timer.start() + time.sleep(2) + print(frames, sele[-1], self.cacheFrames.keys()) + if sele[-1]-1 in self.cacheFrames: + # if self.counter/1000 == 0: + # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + frame = self.cacheFrames[sele[-1]-1] + self.h, self.w, ch = frame.shape + bytesPerLine = ch * self.w + # frame = cv2.resize(frame, (1280, 720)) + qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # 假设qImg是您的QImage对象 + print('保存',self.playList, self.select.value, self.playList[self.select.value]) + file_path = "%s_%s_%s.jpg" % (self.playList[self.select.value].split('/')[-1], sele[-1], frames) # 保存图片的文件路径 + + # 保存QImage对象为图片文件 + qImg.save(file_path) + + # # print("Left mouse button pressed") + # i = random.randint(0, len(self.playList) - 1) + # self.select_ = i + # # s = random.randint(0, self.cacheInfo[i]) + # x = ev.pos().x() + # ip = int(float(x) / self.width() * self.cacheInfo[i]) + # self.select_status.value = 1 + # print('select', i, ip, self.cacheInfo[i]) + # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames, + # # self.cacheFrames.keys()) + # self.select.value = i + # self.tiems.value = ip + # self.playFrame.value = ip + # self.playend.value = 0 + # self.playindexing.value = i + # print('select', i, ip, self.cacheInfo[i]) + # print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value) + # # if not self.timer.isActive(): + # # self.timer.start() + + elif ev.button() == Qt.RightButton: + # print("Right mouse button pressed") + if self.yy.timer.isActive(): + self.yy.timer.stop() + # print(self.playInit[3]) + + else: + # print('start') + self.yy.timer.start() + + def mouseMoveEvent(self, ev): + # self.yy.timer.stop() + # # x = ev.pos().x() + # # ip = int(float(x) / self.width() * self.cacheInfo[self.select_]) + # # self.select_status.value = 1 + # # print('select', self.select_, ip, self.cacheInfo[self.select_]) + # # self.tiems.value = ip - 1 + # # self.playFrame.value = ip - 1 + # # self.hua.value = 1 + # # print(self.cacheFrames.keys()) + # if self.playFrame.value in self.cacheFrames: + # # if self.counter/1000 == 0: + # # # print(len(self.cacheFrames), 'len(self.cacheFrames)') + # frame = self.cacheFrames[self.playFrame.value] + # self.h, self.w, ch = frame.shape + # bytesPerLine = ch * self.w + # # frame = cv2.resize(frame, (1280, 720)) + # qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888) + # pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height()) + # self.setPixmap(pixmap) + super().mouseMoveEvent(ev) + + def mouseReleaseEvent(self, ev): + # self.hua.value = 0 + print(self.playList[self.select.value]) + cap = cv2.VideoCapture(self.playList[self.select.value]) + fps = cap.get(cv2.CAP_PROP_FPS) + self.yy.set(fps) + # time.sleep(1) + self.yy.timer.start() + super().mouseReleaseEvent(ev) + + def mouseDoubleClickEvent(self, a0): + if self.rod.value: + self.rod.value = 0 + else: + self.rod.value = 1 + super().mouseDoubleClickEvent(a0) + + def closeEvent(self, a0): + # self.yy.terminate() + # self.yy.deleteLater() + + # self.p.terminate() + self.p.join() + # self.p.close() + # self.p.kill() + + # self.yy.p.terminate() + self.yy.p.join() + # self.yy.p.close() + # self.yy.p.kill() + super().closeEvent(a0) + # sys.exit() + + +class Get(QThread): + num = pyqtSignal(int) + + def __init__(self, cache=None): + super(Get, self).__init__() + + self.cache = cache + + def run(self) -> None: + while True: + self.cache.value + + +if __name__ == '__main__': + freeze_support() + app = QApplication([]) + widget = Window() + widget.show() + # cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb') + # frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # print(frameNum) + app.exec_() + +# +++++++++++++++++++++++++++++++++++++++++++++++ + + +# coding: utf-8 + +from multiprocessing import Manager, Process +import cv2 +from PyQt5.QtCore import * +from PyQt5.QtWidgets import QApplication + +class PlayInit(QThread): + def __init__(self, ): + super().__init__() + + self.play_list_data = Manager().dict() + self.playInfo = Manager().dict() + self.play_list = None + self.update = False + self.end = False + + def set(self, play_list): + self.play_list = play_list + self.play_list_data.clear() + self.update = True + + def run(self): + while True: + if self.play_list: + if self.update: + for s, i in enumerate(self.play_list): + # print(i) + self.p = Process(target=self.init, args=(s, i,)) + self.p.start() + self.update = False + while True: + if len(self.play_list_data.keys()) == len(self.play_list): + # ps = 1 + # for i in sorted(self.play_list_data.keys()): + # # print(i, '0000') + # if i == 0: + # # print('0', i) + # self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [1, self.play_list_data[i][0][1]]] + # else: + # # print('dd', i-1, self.play_list_data[i-1]) + # frameNum1 = self.play_list_data[i-1][0][1] + 1 + # self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [frameNum1, frameNum1+self.play_list_data[i][0][1]]] + # ps = frameNum1 + 1 + # # print(self.play_list_data[i]) + self.end = True + # print('ssssssss') + break + + def init(self, index, play): + # print(index) + cap = cv2.VideoCapture(play) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.playInfo[index] = frameNum + # self.cacheInfo[s] = frameNum + self.play_list_data[index] = [[1, frameNum]] + while True: + try: + self.play_list_data[0] + if index == 0: + # print('0', index) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [1, self.play_list_data[index][0][1]]] + else: + if index == 1: + # print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)])) + frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1 + # print(frameNum1) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [frameNum1, frameNum1 + self.play_list_data[index][0][1]]] + else: + # print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)])) + frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1 + # print(frameNum1) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [frameNum1+1, frameNum1 + self.play_list_data[index][0][1]]] + break + except: + # print(index) + continue + + +if __name__ == '__main__': + import sys + import os + app = QApplication([]) + playList = [] + for s, i in enumerate(os.listdir('/home/jcen/Videos')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + playList.append(os.path.join(r'/home/jcen/Videos', i)) + s = PlayInit() + s.start() + s.set(playList) + while True: + if s.end: + print(s.play_list_data.items()) + print(s.playInfo.items()) + break + # print(playList) + app.exec_() +# coding: utf-8 + +from multiprocessing import Manager, Process +import cv2 +from PyQt5.QtCore import * +from PyQt5.QtWidgets import QApplication + +class PlayInit(QThread): + def __init__(self, ): + super().__init__() + + self.play_list_data = Manager().dict() + self.playInfo = Manager().dict() + self.play_list = None + self.update = False + self.end = False + + def set(self, play_list): + self.play_list = play_list + self.play_list_data.clear() + self.update = True + + def run(self): + while True: + if self.play_list: + if self.update: + for s, i in enumerate(self.play_list): + # print(i) + self.p = Process(target=self.init, args=(s, i,)) + self.p.start() + self.update = False + while True: + if len(self.play_list_data.keys()) == len(self.play_list): + # ps = 1 + # for i in sorted(self.play_list_data.keys()): + # # print(i, '0000') + # if i == 0: + # # print('0', i) + # self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [1, self.play_list_data[i][0][1]]] + # else: + # # print('dd', i-1, self.play_list_data[i-1]) + # frameNum1 = self.play_list_data[i-1][0][1] + 1 + # self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [frameNum1, frameNum1+self.play_list_data[i][0][1]]] + # ps = frameNum1 + 1 + # # print(self.play_list_data[i]) + self.end = True + # print('ssssssss') + break + + def init(self, index, play): + # print(index) + cap = cv2.VideoCapture(play) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.playInfo[index] = frameNum + # self.cacheInfo[s] = frameNum + self.play_list_data[index] = [[1, frameNum]] + while True: + try: + self.play_list_data[0] + if index == 0: + # print('0', index) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [1, self.play_list_data[index][0][1]]] + else: + if index == 1: + # print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)])) + frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1 + # print(frameNum1) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [frameNum1, frameNum1 + self.play_list_data[index][0][1]]] + else: + # print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)])) + frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1 + # print(frameNum1) + self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]], + [frameNum1+1, frameNum1 + self.play_list_data[index][0][1]]] + break + except: + # print(index) + continue + + +if __name__ == '__main__': + import sys + import os + app = QApplication([]) + playList = [] + for s, i in enumerate(os.listdir('/home/jcen/Videos')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + playList.append(os.path.join(r'/home/jcen/Videos', i)) + s = PlayInit() + s.start() + s.set(playList) + while True: + if s.end: + print(s.play_list_data.items()) + print(s.playInfo.items()) + break + # print(playList) + app.exec_() + + +# ++++++++++++++++++++++++++++ + + + +# coding: utf-8 +import time +from multiprocessing import Manager, Process +import cv2 +from PyQt5.QtCore import * +from PyQt5.QtWidgets import QApplication + + +class PlayInit(QThread): + frame = pyqtSignal() + + def __init__(self, ): + super().__init__() + + self.update = False + self.end = False + + manager = Manager() + self.playList = manager.list() + self.playList_data = manager.dict() + self.playInfo = manager.dict() + self.playCache = manager.dict() + + self.playFrame = manager.Value('i', 1) + self.playIndex = manager.Value('i', 0) + self.playFps = manager.Value('i', 0) + self.playListFps = manager.dict() + + self.count = manager.Value('i', 0) + self.run_count = manager.Value('i', 0) + self.run_object = {} + + def set(self, playList): + for i in playList: + self.playList.append(i) + self.playList_data.clear() + self.update = True + + def set_fps(self, fps): + self.playFps.value = fps + + def set_run_count(self, i): + self.run_count.value = i + + def run(self): + while True: + if self.playList: + if self.update: + for s, i in enumerate(self.playList): + self.p = Process(target=self.init, args=(s, i, self.count, self.playListFps)) + self.p.start() + self.update = False + while True: + if len(self.playList_data.keys()) == len(self.playList): + self.end = True + break + if self.end: + interval = 1 / 24.0 + while True: + start_time = time.time() + self.frame.emit() + elapsed_time = time.time() - start_time + wait_time = interval - elapsed_time + if wait_time > 0: + time.sleep(wait_time) + + def init(self, index, play, count, fps): + count.value += 1 + cap = cv2.VideoCapture(play) + frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + Fps = cap.get(cv2.CAP_PROP_FPS) + fps[index] = Fps + self.playInfo[index] = frameNum + self.playList_data[index] = [[1, frameNum]] + while True: + try: + self.playList_data[0] + if index == 0: + self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]], + [1, self.playList_data[index][0][1]]] + else: + if index == 1: + frameNum1 = sum([self.playList_data[i][0][1] for i in range(index)]) + 1 + self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]], + [frameNum1, frameNum1 + self.playList_data[index][0][1]]] + else: + frameNum1 = sum([self.playList_data[i][0][1] for i in range(index)]) + 1 + self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]], + [frameNum1+1, frameNum1 + self.playList_data[index][0][1]]] + break + except: + continue + # while True: + # if count.value - index > count.value - run_count.value: + # # print(index, '\n') + # ret, frame = cap.read() + # if ret: + # # print(ret, index, '\n') + # continue + + def process_frame(self, run_count): + for i in range(run_count): + p = Process(target=self.run_, args=(i,)) + self.run_object[i] = p + p.start() + + time.sleep(5) + self.run_object[0].terminate() + self.run_object[0].join() + + def run_(self, i): + while True: + print( '\n=========', i, '=========\n') + continue + + +if __name__ == '__main__': + + import sys + import os + app = QApplication([]) + playList = [] + for s, i in enumerate(os.listdir('/home/jcen/Videos')): + if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']: + playList.append(os.path.join(r'/home/jcen/Videos', i)) + s = PlayInit() + s.start() + s.set(playList) + # s.process_frame(1) + while True: + if s.end: + # ret, frame = s.cap[0].read() + # if ret: + # print(ret) + print(s.playList_data.items()) + print(s.playInfo.items(), s.count.value, s.playListFps.items()) + break + # print(playList) + app.exec_() + + + diff --git a/lesson1/list拖拽信号.py b/lesson1/list拖拽信号.py new file mode 100644 index 0000000..aa2f796 --- /dev/null +++ b/lesson1/list拖拽信号.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +import sys +from PyQt5.QtWidgets import QApplication, QWidget, QListWidget, QListWidgetItem, QLabel +from PyQt5.QtCore import Qt, QMimeData +from PyQt5.QtGui import QDrag + + +class DragLabel(QLabel): + def __init__(self, text): + super().__init__(text) + + def mousePressEvent(self, event): + if event.button() == Qt.LeftButton: + drag = QDrag(self) + mime_data = QMimeData() + mime_data.setText(self.text()) + drag.setMimeData(mime_data) + drag.exec_(Qt.MoveAction) + + +class ListWidget(QListWidget): + def __init__(self): + super().__init__() + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(QListWidget.InternalMove) + + for i in range(10): + item = QListWidgetItem(self) + label = DragLabel(f'Label {i}') + item.setSizeHint(label.sizeHint()) + self.setItemWidget(item, label) + + def dragEnterEvent(self, event): + if event.mimeData().hasText(): + event.accept() + else: + event.ignore() + + def dropEvent(self, event): + text = event.mimeData().text() + item = QListWidgetItem(self) + label = DragLabel(text) + item.setSizeHint(label.sizeHint()) + self.setItemWidget(item, label) + + +class MainWindow(QWidget): + def __init__(self): + super().__init__() + self.init_ui() + + def init_ui(self): + list_widget1 = ListWidget() + list_widget2 = ListWidget() + + self.setGeometry(300, 300, 500, 200) + self.setWindowTitle('Drag and Drop Example') + + layout = QHBoxLayout() + layout.addWidget(list_widget1) + layout.addWidget(list_widget2) + self.setLayout(layout) + + self.show() + + +if __name__ == '__main__': + app = QApplication(sys.argv) + main_window = MainWindow() + sys.exit(app.exec_()) \ No newline at end of file diff --git a/lesson1/maya_qt.py b/lesson1/maya_qt.py new file mode 100644 index 0000000..1910e68 --- /dev/null +++ b/lesson1/maya_qt.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +from maya import OpenMaya, mel, cmds +import pymel.core as pm +import pymel.core as pm +from maya import cmds, mel, utils, OpenMaya + +import os +import sys +from PySide6.QtCore import Qt, QByteArray +from PySide6.QtWidgets import QApplication, QMainWindow, QDockWidget, QListWidget, QTextEdit, QPushButton, QWidget +from PySide6.QtCore import QSettings, QDataStream + + +class MainWindow(QMainWindow): + def __init__(self, parent=None): + super(MainWindow, self).__init__(parent) + + + self.setObjectName('MainWindow') + b = QPushButton('此为中央控件,点击保存布局') + b.clicked.connect(self.save_layout) + self.setCentralWidget(b) + + self.setWindowTitle('Dock 例子') + self.load_layout() + def save_layout(self): + settings = QSettings(r"C:\Users\Jcen\PycharmProjects\pythonProject\settings.ini", QSettings.IniFormat) + settings.setValue("geometry", self.saveGeometry()) + settings.setValue("state", self.saveState()) + + settings.beginGroup("dockWidgets") + dock_widgets = self.findChildren(QDockWidget) + for dock_widget in dock_widgets: + settings.setValue("%s%s" % (dock_widget.windowTitle(), dock_widget.objectName()), dock_widget.saveGeometry()) + + def load_layout(self): + settings = QSettings(r"C:\Users\Jcen\PycharmProjects\pythonProject\settings.ini", QSettings.IniFormat) + settings.beginGroup("dockWidgets") + for key in settings.allKeys(): + self.items = QDockWidget('%s' % (key[:-1]), self) + self.items.setObjectName('%s' % (key[-1:])) + self.addDockWidget(Qt.RightDockWidgetArea, self.items) + + settings.endGroup() + self.restoreGeometry(settings.value("geometry")) + self.restoreState(settings.value("state")) + + +def create_tab( + dock_name, + widget, + label='WidgetLabel', + dockTab='Channel Box / Layer Editor' +): + cmds.workspaceControl(dock_name, uiScript='', label=label) + cmds.control(str(widget.objectName()), e=True, p=dock_name) + tab_result = mel.eval('getUIComponentDockControl("{}", false)'.format(dockTab)) + cmds.workspaceControl(dock_name, e=True, tabToControl=(tab_result, -1)) + +if OpenMaya.MGlobal.mayaState() == OpenMaya.MGlobal.kInteractive: + + # create project manager tab + mayaProjectManager = MainWindow() + create_tab( + 'ProjectManagerDock1201111', + mayaProjectManager, + label='Project Manager' + ) diff --git a/lesson1/pyqt下重定向.py b/lesson1/pyqt下重定向.py new file mode 100644 index 0000000..1c51631 --- /dev/null +++ b/lesson1/pyqt下重定向.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +import sys +import traceback +from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QTextEdit +from PyQt5.QtCore import Qt + +class MyWindow(QMainWindow): + def __init__(self): + super().__init__() + + self.setWindowTitle("Error Handling Example") + self.setGeometry(100, 100, 800, 600) + + # 创建布局和文本编辑部件 + layout = QVBoxLayout() + self.text_edit = QTextEdit() + + # 将文本编辑部件添加到布局中 + layout.addWidget(self.text_edit) + + # 创建一个小部件并将布局设置为其布局 + widget = QWidget() + widget.setLayout(layout) + + # 将小部件设置为主窗口的中央部件 + self.setCentralWidget(widget) + + # 重定向错误输出到文本编辑部件 + sys.excepthook = self.handle_exception + + # 设置文本编辑部件为只读 + self.text_edit.setReadOnly(True) + + def handle_exception(self, exc_type, exc_value, exc_traceback): + # 将错误信息输出到文本编辑部件 + error_message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + self.text_edit.append(error_message) + +if __name__ == "__main__": + app = QApplication([]) + window = MyWindow() + window.show() + + # 产生一个错误 + print(1 / 0) + + app.exec_() \ No newline at end of file diff --git a/lesson1/qt6截屏.py b/lesson1/qt6截屏.py new file mode 100644 index 0000000..871fb4b --- /dev/null +++ b/lesson1/qt6截屏.py @@ -0,0 +1,1121 @@ +# -*- coding: utf-8 -*- +# coding=utf-8 +import os +import sys +from datetime import datetime +from PyQt6 import QtCore, QtGui +from PyQt6.QtWidgets import * +from PyQt6.QtCore import QRectF, QRect, QSizeF, QPointF, QPoint, QMarginsF +from PyQt6.QtGui import QPainter, QPen, QPixmap, QColor, QAction, QIcon + + +class TextInputWidget(QTextEdit): + '''在截图区域内的文本输入框''' + + def __init__(self, god=None): + super().__init__(god) + self.god = god + # 设置背景透明 + # self.setStyleSheet("QTextEdit{background-color: transparent;}") + palette = self.palette() + palette.setBrush(QtGui.QPalette.ColorRole.Base, self.god.color_transparent) + self.setPalette(palette) + self.setTextColor(self.god.toolbar.curColor()) + self.setCurrentFont(self.god.toolbar.curFont()) + self._doc = self.document() # QTextDocument + self.textChanged.connect(self.adjustSizeByContent) + self.adjustSizeByContent() # 初始化调整高度为一行 + self.hide() + + def adjustSizeByContent(self, margin=30): + '''限制宽度不超出截图区域,根据文本内容调整高度,不会出现滚动条''' + self._doc.setTextWidth(self.viewport().width()) + margins = self.contentsMargins() + h = int(self._doc.size().height() + margins.top() + margins.bottom()) + self.setFixedHeight(h) + + def beginNewInput(self, pos, endPointF): + '''开始新的文本输入''' + self._maxRect = self.god.screenArea.normalizeRectF(pos, endPointF) + self.waitForInput() + + def waitForInput(self): + self.setGeometry(self._maxRect.toRect()) + # self.setGeometry(self._maxRect.adjusted(0, 0, -1, 0)) # 宽度-1 + self.setFocus() + self.show() + + def loadTextInputBy(self, action): + '''载入修改旧的文本 + action:(type, color, font, rectf, txt)''' + self.setTextColor(action[1]) + self.setCurrentFont(action[2]) + self._maxRect = action[3] + self.append(action[4]) + self.god.isDrawing = True + self.waitForInput() + + +class LineWidthAction(QAction): + + '''画笔粗细选择器''' + + def __init__(self, text, parent, lineWidth): + super().__init__(text, parent) + self._lineWidth = lineWidth + self.refresh(QtCore.Qt.GlobalColor.red) + self.triggered.connect(self.onTriggered) + self.setVisible(False) + + def refresh(self, color): + painter = self.parent().god.screenArea._painter + dotRadius = QPointF(self._lineWidth, self._lineWidth) + centerPoint = self.parent().iconPixmapCenter() + pixmap = self.parent().iconPixmapCopy() + painter.begin(pixmap) + painter.setPen(self.parent().god.pen_transparent) + painter.setBrush(color) + painter.drawEllipse(QRectF(centerPoint - dotRadius, centerPoint + dotRadius)) + painter.end() + self.setIcon(QIcon(pixmap)) + + def onTriggered(self): + self.parent()._curLineWidth = self._lineWidth + + +class FontAction(QAction): + + '''字体选择器''' + + def __init__(self, text, parent): + super().__init__(text, parent) + self.setIcon(QIcon(r"img/sys/font.png")) + self._curFont = self.parent().god.font_textInput + self.triggered.connect(self.onTriggered) + self.setVisible(False) + + def onTriggered(self): + font, ok = QFontDialog.getFont(self._curFont, self.parent(), caption='选择字体') + if ok: + self._curFont = font + self.parent().god.textInputWg.setCurrentFont(font) + + +class ColorAction(QAction): + + '''颜色选择器''' + + def __init__(self, text, parent): + super().__init__(text, parent) + self._curColor = QtCore.Qt.GlobalColor.red + self._pixmap = QPixmap(32, 32) + self.refresh(self._curColor) + self.triggered.connect(self.onTriggered) + + def refresh(self, color): + self._curColor = color + self._pixmap.fill(color) + self.setIcon(QIcon(self._pixmap)) + self.parent()._at_line_small.refresh(color) + self.parent()._at_line_normal.refresh(color) + self.parent()._at_line_big.refresh(color) + + def onTriggered(self): + col = QColorDialog.getColor(self._curColor, self.parent(), title='选择颜色') + if col.isValid(): + self.refresh(col) + self.parent().god.textInputWg.setTextColor(col) + + +class ScreenShotToolBar(QToolBar): + '''截图区域工具条''' + + def __init__(self, god): + super().__init__(god) + self.god = god + self.setToolButtonStyle(QtCore.Qt.ToolButtonStyle.ToolButtonTextUnderIcon) + self.setStyleSheet("QToolBar {border-radius: 5px;padding: 3px;background-color: #eeeeef;}") + self._style_normal = "QToolBar QToolButton{color: black;}" + self._style_selected = "QToolBar QToolButton{color: #ff7300;border: 1px solid #BEDAF2;background-color: #D6E4F1}" # 与鼠标悬停样式一样 + self._iconPixmap = QPixmap(32, 32) + self._iconPixmap.fill(self.god.color_transparent) + self._iconPixmapCenter = QPointF(self._iconPixmap.rect().center()) + self._curLineWidth = 3 + self._at_line_small = LineWidthAction('细', self, self._curLineWidth - 2) + self._at_line_normal = LineWidthAction('中', self, self._curLineWidth) + self._at_line_big = LineWidthAction('粗', self, self._curLineWidth + 2) + self._at_font = FontAction('字体', self) + self._at_color = ColorAction('颜色', self) + self._at_rectangle = QAction(QIcon(r"img/sys/rectangle.png"), '矩形', self, triggered=self.beforeDrawRectangle) + self._at_ellipse = QAction(QIcon(r"img/sys/ellipse.png"), '椭圆', self, triggered=self.beforeDrawEllipse) + self._at_graffiti = QAction(QIcon(r"img/sys/graffiti.png"), '涂鸦', self, triggered=self.beforeDrawGraffiti) + self._at_textInput = QAction(QIcon(r"img/sys/write.png"), '文字', self, triggered=self.beforeDrawText) + self.addAction(self._at_line_small) + self.addAction(self._at_line_normal) + self.addAction(self._at_line_big) + self.addAction(self._at_font) + self.addAction(self._at_color) + self.addSeparator() + self.addAction(self._at_rectangle) + self.addAction(self._at_ellipse) + self.addAction(self._at_graffiti) + self.addAction(self._at_textInput) + self.addAction(QAction(QIcon(r"img/sys/undo.png"), '撤销', self, triggered=self.undo)) + self.addSeparator() + self.addAction(QAction(QIcon(r"img/sys/logout.png"), '退出', self, triggered=self.god.close)) + self.addAction(QAction(QIcon(r"img/chat/download.png"), '保存', self, triggered=lambda: self.beforeSave('local'))) + self.addAction(QAction(QIcon(r"img/chat/sendImg.png"), '复制', self, triggered=lambda: self.beforeSave('clipboard'))) + self.actionTriggered.connect(self.onActionTriggered) + + def curLineWidth(self): + return self._curLineWidth + + def curFont(self): + return self._at_font._curFont + + def curColor(self): + return self._at_color._curColor + # return QColor(self._at_color._curColor.toRgb()) # 颜色的副本 + + def iconPixmapCopy(self): + return self._iconPixmap.copy() + + def iconPixmapCenter(self): + return self._iconPixmapCenter + + def onActionTriggered(self, action): + '''突出显示已选中的画笔粗细、编辑模式''' + for at in [self._at_line_small, self._at_line_normal, self._at_line_big]: + if at._lineWidth == self._curLineWidth: + self.widgetForAction(at).setStyleSheet(self._style_selected) + else: + self.widgetForAction(at).setStyleSheet(self._style_normal) + if self.god.isDrawRectangle: + self.widgetForAction(self._at_rectangle).setStyleSheet(self._style_selected) + else: + self.widgetForAction(self._at_rectangle).setStyleSheet(self._style_normal) + if self.god.isDrawEllipse: + self.widgetForAction(self._at_ellipse).setStyleSheet(self._style_selected) + else: + self.widgetForAction(self._at_ellipse).setStyleSheet(self._style_normal) + if self.god.isDrawGraffiti: + self.widgetForAction(self._at_graffiti).setStyleSheet(self._style_selected) + else: + self.widgetForAction(self._at_graffiti).setStyleSheet(self._style_normal) + if self.god.isDrawText: + self.widgetForAction(self._at_textInput).setStyleSheet(self._style_selected) + else: + self.widgetForAction(self._at_textInput).setStyleSheet(self._style_normal) + + def setLineWidthActionVisible(self, flag): + self._at_line_small.setVisible(flag) + self._at_line_normal.setVisible(flag) + self._at_line_big.setVisible(flag) + + def beforeDrawRectangle(self): + self.god.clearEditFlags() + self.god.isDrawRectangle = True + self.setLineWidthActionVisible(True) + self._at_font.setVisible(False) + + def beforeDrawEllipse(self): + self.god.clearEditFlags() + self.god.isDrawEllipse = True + self.setLineWidthActionVisible(True) + self._at_font.setVisible(False) + + def beforeDrawGraffiti(self): + self.god.clearEditFlags() + self.god.isDrawGraffiti = True + self.setLineWidthActionVisible(True) + self._at_font.setVisible(False) + + def beforeDrawText(self): + self.god.clearEditFlags() + self.god.isDrawText = True + self.setLineWidthActionVisible(False) + self._at_font.setVisible(True) + + def undo(self): + '''撤销上次编辑行为''' + if self.god.screenArea.undoEditAction(): + self.god.update() + + def beforeSave(self, target): + # 若正在编辑文本未保存,先完成编辑 + if self.god.isDrawing and self.god.isDrawText: + self.god.screenArea.saveTextInputAction() + if target == 'local': + self.god.save2Local() + elif target == 'clipboard': + self.god.save2Clipboard() + + def enterEvent(self, event): + self.god.setCursor(QtCore.Qt.CursorShape.ArrowCursor) # 工具条上显示标准箭头cursor + + def leaveEvent(self, event): + self.god.setCursor(QtCore.Qt.CursorShape.CrossCursor) # 十字无箭头 + + +class ScreenArea(QtCore.QObject): + '''屏幕区域(提供各种算法的核心类),划分为9个子区域: + TopLeft,Top,TopRight + Left,Center,Right + BottomLeft,Bottom,BottomRight + 其中Center根据start、end两个QPointF确定 + ''' + + def __init__(self, god): + super().__init__() + self.god = god + self._pt_start = QPointF() # 划定截图区域时鼠标左键按下的位置(topLeft) + self._pt_end = QPointF() # 划定截图区域时鼠标左键松开的位置(bottomRight) + self._rt_toolbar = QRectF() # 工具条的矩形 + self._actions = [] # 在截图区域上的所有编辑行为(矩形、椭圆、涂鸦、文本输入等) + self._pt_startEdit = QPointF() # 在截图区域上绘制矩形、椭圆时鼠标左键按下的位置(topLeft) + self._pt_endEdit = QPointF() # 在截图区域上绘制矩形、椭圆时鼠标左键松开的位置(bottomRight) + self._pointfs = [] # 涂鸦经过的所有点 + self._painter = QPainter() # 独立于ScreenShotWidget之外的画家类 + self._textOption = QtGui.QTextOption(QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignTop) + self._textOption.setWrapMode(QtGui.QTextOption.WrapMode.WrapAnywhere) # 文本在矩形内自动换行 + # self._textOption.setWrapMode(QtGui.QTextOption.WrapMode.WrapAtWordBoundaryOrAnywhere) + self.captureScreen() + + def captureScreen(self): + '''抓取整个屏幕的截图''' + # screen = QtGui.QGuiApplication.primaryScreen() + self._screenPixmap = QApplication.primaryScreen().grabWindow() + self._pixelRatio = self._screenPixmap.devicePixelRatio() # 设备像素比 + self._rt_screen = self.screenLogicalRectF() + self.remakeNightArea() + + def normalizeRectF(self, topLeftPoint, bottomRightPoint): + '''根据起止点生成宽高非负数的QRectF,通常用于bottomRightPoint比topLeftPoint更左更上的情况 + 入参可以是QPoint或QPointF''' + rectf = QRectF(topLeftPoint, bottomRightPoint) + x = rectf.x() + y = rectf.y() + w = rectf.width() + h = rectf.height() + if w < 0: # bottomRightPoint在topLeftPoint左侧时,topLeftPoint往左移动 + x = x + w + w = -w + if h < 0: # bottomRightPoint在topLeftPoint上侧时,topLeftPoint往上移动 + y = y + h + h = -h + return QRectF(x, y, w, h) + + def physicalRectF(self, rectf): + '''计算划定的截图区域的(缩放倍率1.0的)原始矩形(会变大) + rectf:划定的截图区域的矩形。可为QRect或QRectF''' + return QRectF(rectf.x() * self._pixelRatio, rectf.y() * self._pixelRatio, + rectf.width() * self._pixelRatio, rectf.height() * self._pixelRatio) + + def logicalRectF(self, physicalRectF): + '''根据原始矩形计算缩放后的矩形(会变小) + physicalRectF:缩放倍率1.0的原始矩形。可为QRect或QRectF''' + return QRectF(physicalRectF.x() / self._pixelRatio, physicalRectF.y() / self._pixelRatio, + physicalRectF.width() / self._pixelRatio, physicalRectF.height() / self._pixelRatio) + + def physicalPixmap(self, rectf, editAction=False): + '''根据指定区域获取其原始大小的(缩放倍率1.0的)QPixmap + rectf:指定区域。可为QRect或QRectF + editAction:是否带上编辑结果''' + if editAction: + canvasPixmap = self.screenPhysicalPixmapCopy() + self._painter.begin(canvasPixmap) + self.paintEachEditAction(self._painter, textBorder=False) + self._painter.end() + return canvasPixmap.copy(self.physicalRectF(rectf).toRect()) + else: + return self._screenPixmap.copy(self.physicalRectF(rectf).toRect()) + + def screenPhysicalRectF(self): + return QRectF(self._screenPixmap.rect()) + + def screenLogicalRectF(self): + return QRectF(QPointF(0, 0), self.screenLogicalSizeF()) # 即当前屏幕显示的大小 + + def screenPhysicalSizeF(self): + return QSizeF(self._screenPixmap.size()) + + def screenLogicalSizeF(self): + return QSizeF(self._screenPixmap.width() / self._pixelRatio, self._screenPixmap.height() / self._pixelRatio) + + def screenPhysicalPixmapCopy(self): + return self._screenPixmap.copy() + + def screenLogicalPixmapCopy(self): + return self._screenPixmap.scaled(self.screenLogicalSizeF().toSize()) + + def centerPhysicalRectF(self): + return self.physicalRectF(self._rt_center) + + def centerLogicalRectF(self): + '''根据屏幕上的start、end两个QPointF确定''' + return self._rt_center + + def centerPhysicalPixmap(self, editAction=True): + '''截图区域的QPixmap + editAction:是否带上编辑结果''' + return self.physicalPixmap(self._rt_center + QMarginsF(-1, -1, 1, 1), editAction=editAction) + + def centerTopMid(self): + return self._pt_centerTopMid + + def centerBottomMid(self): + return self._pt_centerBottomMid + + def centerLeftMid(self): + return self._pt_centerLeftMid + + def centerRightMid(self): + return self._pt_centerRightMid + + def setStartPoint(self, pointf, remake=False): + self._pt_start = pointf + if remake: + self.remakeNightArea() + + def setEndPoint(self, pointf, remake=False): + self._pt_end = pointf + if remake: + self.remakeNightArea() + + def setCenterArea(self, start, end): + self._pt_start = start + self._pt_end = end + self.remakeNightArea() + + def remakeNightArea(self): + '''重新划分九宫格区域。根据中央截图区域计算出来的其他8个区域、截图区域四个边框中点坐标等都是logical的''' + self._rt_center = self.normalizeRectF(self._pt_start, self._pt_end) + # 中央区域上下左右边框的中点,用于调整大小 + self._pt_centerTopMid = (self._rt_center.topLeft() + self._rt_center.topRight()) / 2 + self._pt_centerBottomMid = (self._rt_center.bottomLeft() + self._rt_center.bottomRight()) / 2 + self._pt_centerLeftMid = (self._rt_center.topLeft() + self._rt_center.bottomLeft()) / 2 + self._pt_centerRightMid = (self._rt_center.topRight() + self._rt_center.bottomRight()) / 2 + # 以截图区域左上、上中、右上、左中、右中、左下、下中、右下为中心的正方形区域,用于调整大小 + self._square_topLeft = self.squareAreaByCenter(self._rt_center.topLeft()) + self._square_topRight = self.squareAreaByCenter(self._rt_center.topRight()) + self._square_bottomLeft = self.squareAreaByCenter(self._rt_center.bottomLeft()) + self._square_bottomRight = self.squareAreaByCenter(self._rt_center.bottomRight()) + self._square_topMid = self.squareAreaByCenter(self._pt_centerTopMid) + self._square_bottomMid = self.squareAreaByCenter(self._pt_centerBottomMid) + self._square_leftMid = self.squareAreaByCenter(self._pt_centerLeftMid) + self._square_rightMid = self.squareAreaByCenter(self._pt_centerRightMid) + # 除中央截图区域外的8个区域 + self._rt_topLeft = QRectF(self._rt_screen.topLeft(), self._rt_center.topLeft()) + self._rt_top = QRectF(QPointF(self._rt_center.topLeft().x(), 0), self._rt_center.topRight()) + self._rt_topRight = QRectF(QPointF(self._rt_center.topRight().x(), 0), QPointF(self._rt_screen.width(), self._rt_center.topRight().y())) + self._rt_left = QRectF(QPointF(0, self._rt_center.topLeft().y()), self._rt_center.bottomLeft()) + self._rt_right = QRectF(self._rt_center.topRight(), QPointF(self._rt_screen.width(), self._rt_center.bottomRight().y())) + self._rt_bottomLeft = QRectF(QPointF(0, self._rt_center.bottomLeft().y()), QPointF(self._rt_center.bottomLeft().x(), self._rt_screen.height())) + self._rt_bottom = QRectF(self._rt_center.bottomLeft(), QPointF(self._rt_center.bottomRight().x(), self._rt_screen.height())) + self._rt_bottomRight = QRectF(self._rt_center.bottomRight(), self._rt_screen.bottomRight()) + + def squareAreaByCenter(self, pointf): + '''以QPointF为中心的正方形QRectF''' + rectf = QRectF(0, 0, 15, 15) + rectf.moveCenter(pointf) + return rectf + + def aroundAreaIn8Direction(self): + '''中央区域周边的8个方向的区域(无交集)''' + return [self._rt_topLeft, self._rt_top, self._rt_topRight, + self._rt_left, self._rt_right, + self._rt_bottomLeft, self._rt_bottom, self._rt_bottomRight] + + def aroundAreaIn4Direction(self): + '''中央区域周边的4个方向的区域(有交集) + 上区域(左上、上、右上):0, 0, maxX, topRight.y + 下区域(左下、下、右下):0, bottomLeft.y, maxX, maxY-bottomLeft.y + 左区域(左上、左、左下):0, 0, bottomLeft.x, maxY + 右区域(右上、右、右下):topRight.x, 0, maxX - topRight.x, maxY''' + screenSizeF = self.screenLogicalSizeF() + pt_topRight = self._rt_center.topRight() + pt_bottomLeft = self._rt_center.bottomLeft() + return [QRectF(0, 0, screenSizeF.width(), pt_topRight.y()), + QRectF(0, pt_bottomLeft.y(), screenSizeF.width(), screenSizeF.height() - pt_bottomLeft.y()), + QRectF(0, 0, pt_bottomLeft.x(), screenSizeF.height()), + QRectF(pt_topRight.x(), 0, screenSizeF.width() - pt_topRight.x(), screenSizeF.height())] + + def aroundAreaWithoutIntersection(self): + '''中央区域周边的4个方向的区域(无交集) + 上区域(左上、上、右上):0, 0, maxX, topRight.y + 下区域(左下、下、右下):0, bottomLeft.y, maxX, maxY-bottomLeft.y + 左区域(左):0, topRight.y, bottomLeft.x-1, center.height + 右区域(右):topRight.x+1, topRight.y, maxX - topRight.x, center.height''' + screenSizeF = self.screenLogicalSizeF() + pt_topRight = self._rt_center.topRight() + pt_bottomLeft = self._rt_center.bottomLeft() + centerHeight = pt_bottomLeft.y() - pt_topRight.y() + return [QRectF(0, 0, screenSizeF.width(), pt_topRight.y()), + QRectF(0, pt_bottomLeft.y(), screenSizeF.width(), screenSizeF.height() - pt_bottomLeft.y()), + QRectF(0, pt_topRight.y(), pt_bottomLeft.x() - 1, centerHeight), + QRectF(pt_topRight.x() + 1, pt_topRight.y(), screenSizeF.width() - pt_topRight.x(), centerHeight)] + + def setBeginDragPoint(self, pointf): + '''计算开始拖拽位置距离截图区域左上角的向量''' + self._drag_vector = pointf - self._rt_center.topLeft() + + def getNewPosAfterDrag(self, pointf): + '''计算拖拽后截图区域左上角的新位置''' + return pointf - self._drag_vector + + def moveCenterAreaTo(self, pointf): + '''限制拖拽不能超出屏幕范围''' + self._rt_center.moveTo(self.getNewPosAfterDrag(pointf)) + startPointF = self._rt_center.topLeft() + if startPointF.x() < 0: + self._rt_center.moveTo(0, startPointF.y()) + startPointF = self._rt_center.topLeft() + if startPointF.y() < 0: + self._rt_center.moveTo(startPointF.x(), 0) + screenSizeF = self.screenLogicalSizeF() + endPointF = self._rt_center.bottomRight() + if endPointF.x() > screenSizeF.width(): + self._rt_center.moveBottomRight(QPointF(screenSizeF.width(), endPointF.y())) + endPointF = self._rt_center.bottomRight() + if endPointF.y() > screenSizeF.height(): + self._rt_center.moveBottomRight(QPointF(endPointF.x(), screenSizeF.height())) + self.setCenterArea(self._rt_center.topLeft(), self._rt_center.bottomRight()) + + def setBeginAdjustPoint(self, pointf): + '''判断开始调整截图区域大小时鼠标左键在哪个区(不可能是中央区域),用于判断调整大小的意图方向''' + self._mousePos = self.getMousePosBy(pointf) + + def getMousePosBy(self, pointf): + if self._square_topLeft.contains(pointf): + return 'TL' + elif self._square_topMid.contains(pointf): + return 'T' + elif self._square_topRight.contains(pointf): + return 'TR' + elif self._square_leftMid.contains(pointf): + return 'L' + elif self._rt_center.contains(pointf): + return 'CENTER' + elif self._square_rightMid.contains(pointf): + return 'R' + elif self._square_bottomLeft.contains(pointf): + return 'BL' + elif self._square_bottomMid.contains(pointf): + return 'B' + elif self._square_bottomRight.contains(pointf): + return 'BR' + else: + return 'ERROR' + + def adjustCenterAreaBy(self, pointf): + '''根据开始调整截图区域大小时鼠标左键在哪个区(不可能是中央区域),判断调整大小的意图方向,判定新的开始、结束位置''' + startPointF = self._rt_center.topLeft() + endPointF = self._rt_center.bottomRight() + if self._mousePos == 'TL': + startPointF = pointf + elif self._mousePos == 'T': + startPointF = QPointF(startPointF.x(), pointf.y()) + elif self._mousePos == 'TR': + startPointF = QPointF(startPointF.x(), pointf.y()) + endPointF = QPointF(pointf.x(), endPointF.y()) + elif self._mousePos == 'L': + startPointF = QPointF(pointf.x(), startPointF.y()) + elif self._mousePos == 'R': + endPointF = QPointF(pointf.x(), endPointF.y()) + elif self._mousePos == 'BL': + startPointF = QPointF(pointf.x(), startPointF.y()) + endPointF = QPointF(endPointF.x(), pointf.y()) + elif self._mousePos == 'B': + endPointF = QPointF(endPointF.x(), pointf.y()) + elif self._mousePos == 'BR': + endPointF = pointf + else: # 'ERROR' + return + newRectF = self.normalizeRectF(startPointF, endPointF) + self.setCenterArea(newRectF.topLeft(), newRectF.bottomRight()) + + def getMouseShapeBy(self, pointf): + '''根据鼠标位置返回对应的鼠标样式''' + if self._rt_center.contains(pointf): + if self.god.isDrawRectangle or self.god.isDrawEllipse: + return QtCore.Qt.CursorShape.ArrowCursor + elif self.god.isDrawGraffiti: + return QtCore.Qt.CursorShape.PointingHandCursor # 超链接上的手势 + elif self.god.isDrawText: + return QtCore.Qt.CursorShape.IBeamCursor # 工字 + else: + return QtCore.Qt.CursorShape.SizeAllCursor # 十字有箭头 + # return QtCore.Qt.CursorShape.OpenHandCursor # 打开的手,表示可拖拽 + elif self._square_topLeft.contains(pointf) or self._square_bottomRight.contains(pointf): + return QtCore.Qt.CursorShape.SizeFDiagCursor # ↖↘ + elif self._square_topMid.contains(pointf) or self._square_bottomMid.contains(pointf): + return QtCore.Qt.CursorShape.SizeVerCursor # ↑↓ + elif self._square_topRight.contains(pointf) or self._square_bottomLeft.contains(pointf): + return QtCore.Qt.CursorShape.SizeBDiagCursor # ↙↗ + elif self._square_leftMid.contains(pointf) or self._square_rightMid.contains(pointf): + return QtCore.Qt.CursorShape.SizeHorCursor # ←→ + else: + return QtCore.Qt.CursorShape.CrossCursor # 十字无箭头 + + def isMousePosInCenterRectF(self, pointf): + return self._rt_center.contains(pointf) + + def paintMagnifyingGlassPixmap(self, pos, glassSize): + '''绘制放大镜内的图像(含纵横十字线) + pos:鼠标光标位置 + glassSize:放大镜边框大小''' + pixmapRect = QRect(0, 0, 20, 20) # 以鼠标光标为中心的正方形区域,最好是偶数 + pixmapRect.moveCenter(pos) + glassPixmap = self.physicalPixmap(pixmapRect) + glassPixmap.setDevicePixelRatio(1.0) + glassPixmap = glassPixmap.scaled(glassSize, glassSize, QtCore.Qt.AspectRatioMode.KeepAspectRatio) + # 在放大后的QPixmap上画纵横十字线 + self._painter.begin(glassPixmap) + halfWidth = glassPixmap.width() / 2 + halfHeight = glassPixmap.height() / 2 + self._painter.setPen(self.god.pen_SolidLine_lightBlue) + self._painter.drawLine(QPointF(0, halfHeight), QPointF(glassPixmap.width(), halfHeight)) + self._painter.drawLine(QPointF(halfWidth, 0), QPointF(halfWidth, glassPixmap.height())) + self._painter.end() + return glassPixmap + + def paintEachEditAction(self, painter, textBorder=True): + '''绘制所有已保存的编辑行为。编辑行为超出截图区域也无所谓,保存图像时只截取截图区域内 + textBorder:是否绘制文本边框''' + for action in self.getEditActions(): + if action[0] == 'rectangle': # (type, color, lineWidth, startPoint, endPoint) + self.paintRectangle(painter, action[1], action[2], action[3], action[4]) + elif action[0] == 'ellipse': # (type, color, lineWidth, startPoint, endPoint) + self.paintEllipse(painter, action[1], action[2], action[3], action[4]) + elif action[0] == 'graffiti': # (type, color, lineWidth, points) + self.paintGraffiti(painter, action[1], action[2], action[3]) + elif action[0] == 'text': # (type, color, font, rectf, txt) + self.paintTextInput(painter, action[1], action[2], action[3], action[4], textBorder=textBorder) + + def paintRectangle(self, painter, color, lineWidth, startPoint=None, endPoint=None): + if not startPoint: + startPoint = self._pt_startEdit + if not endPoint: + endPoint = self._pt_endEdit + qrectf = self.normalizeRectF(startPoint, endPoint) + if qrectf.isValid(): + pen = QPen(color) + pen.setWidth(lineWidth) + painter.setPen(pen) + painter.setBrush(self.god.color_transparent) + painter.drawRect(qrectf) + + def paintEllipse(self, painter, color, lineWidth, startPoint=None, endPoint=None): + if not startPoint: + startPoint = self._pt_startEdit + if not endPoint: + endPoint = self._pt_endEdit + qrectf = self.normalizeRectF(startPoint, endPoint) + if qrectf.isValid(): + pen = QPen(color) + pen.setWidth(lineWidth) + painter.setPen(pen) + painter.setBrush(self.god.color_transparent) + painter.drawEllipse(qrectf) + + def paintGraffiti(self, painter, color, lineWidth, pointfs=None): + if not pointfs: + pointfs = self.getGraffitiPointFs() + pen = QPen(color) + pen.setWidth(lineWidth) + painter.setPen(pen) + total = len(pointfs) + if total == 0: + return + elif total == 1: + painter.drawPoint(pointfs[0]) + else: + previousPoint = pointfs[0] + for i in range(1, total): + nextPoint = pointfs[i] + painter.drawLine(previousPoint, nextPoint) + previousPoint = nextPoint + + def paintTextInput(self, painter, color, font, rectf, txt, textBorder=True): + painter.setPen(color) + painter.setFont(font) + painter.drawText(rectf, txt, self._textOption) + if textBorder: + painter.setPen(QtCore.Qt.PenStyle.DotLine) # 点线 + painter.setBrush(self.god.color_transparent) + painter.drawRect(rectf) + + def getEditActions(self): + return self._actions.copy() + + def takeTextInputActionAt(self, pointf): + '''根据鼠标位置查找已保存的文本输入结果,找到后取出''' + for i in range(len(self._actions)): + action = self._actions[i] + if action[0] == 'text' and action[3].contains(pointf): + return self._actions.pop(i) + return None + + def undoEditAction(self): + reply = False + if self._actions: + reply = self._actions.pop() + if not self._actions: # 所有编辑行为都被撤销后退出编辑模式 + self.god.exitEditMode() + else: + self.god.exitEditMode() + return reply + + def clearEditActions(self): + self._actions.clear() + + def setBeginEditPoint(self, pointf): + '''在截图区域上绘制矩形、椭圆时鼠标左键按下的位置(topLeft)''' + self._pt_startEdit = pointf + self.god.isDrawing = True + + def setEndEditPoint(self, pointf): + '''在截图区域上绘制矩形、椭圆时鼠标左键松开的位置(bottomRight)''' + self._pt_endEdit = pointf + + def saveRectangleAction(self): + rectf = self.normalizeRectF(self._pt_startEdit, self._pt_endEdit) + self._actions.append(('rectangle', self.god.toolbar.curColor(), self.god.toolbar.curLineWidth(), + rectf.topLeft(), rectf.bottomRight())) + self._pt_startEdit = QPointF() + self._pt_endEdit = QPointF() + self.god.isDrawing = False + + def saveEllipseleAction(self): + rectf = self.normalizeRectF(self._pt_startEdit, self._pt_endEdit) + self._actions.append(('ellipse', self.god.toolbar.curColor(), self.god.toolbar.curLineWidth(), + rectf.topLeft(), rectf.bottomRight())) + self._pt_startEdit = QPointF() + self._pt_endEdit = QPointF() + self.god.isDrawing = False + + def saveGraffitiPointF(self, pointf, first=False): + self._pointfs.append(pointf) + if first: + self.god.isDrawing = True + + def getGraffitiPointFs(self): + return self._pointfs.copy() + + def saveGraffitiAction(self): + if self._pointfs: + self._actions.append(('graffiti', self.god.toolbar.curColor(), self.god.toolbar.curLineWidth(), self._pointfs.copy())) + self._pointfs.clear() + self.god.isDrawing = False + + def setBeginInputTextPoint(self, pointf): + '''在截图区域上输入文字时鼠标左键按下的位置(topLeft)''' + self.god.isDrawing = True + self.god.textInputWg.beginNewInput(pointf, self._pt_end) + + def saveTextInputAction(self): + txt = self.god.textInputWg.toPlainText() + if txt: + rectf = self.god.textInputWg._maxRect # 取最大矩形的topLeft + rectf.setSize(QRectF(self.god.textInputWg.rect()).size()) # 取实际矩形的宽高 + self._actions.append(('text', self.god.toolbar.curColor(), self.god.toolbar.curFont(), + rectf, txt)) + self.god.textInputWg.clear() + self.god.textInputWg.hide() # 不管保存成功与否都取消编辑 + self.god.isDrawing = False + + def saveNightAreaImg(self): + '''将九宫格区域保存为本地图片,仅用于开发测试''' + screenPixmap = self.screenPhysicalPixmapCopy() + self._painter.begin(screenPixmap) + self._painter.setPen(self.pen_SolidLine_lightBlue) + self._painter.setFont(self.god.font_normal) + self._painter.drawRect(self._rt_center) + for area in self.aroundAreaIn8Direction(): + self._painter.drawRect(area) + for pointf in [self._rt_center.topLeft(), self._rt_center.topRight(), + self._rt_center.bottomLeft(), self._rt_center.bottomRight(), + self._pt_centerTopMid, self._pt_centerBottomMid, + self._pt_centerLeftMid, self._pt_centerRightMid]: + self._painter.drawText(pointf + QPointF(5, -5), '(%s, %s)' % (pointf.x(), pointf.y())) + self._painter.end() + screenPixmap.save('1.jpg', quality=100) + self.centerPhysicalPixmap().save('2.jpg', quality=100) + + +class ScreenShotWidget(QWidget): + + fileType_all = '所有文件 (*);;Excel文件 (*.xls *.xlsx);;图片文件 (*.jpg *.jpeg *.gif *.png *.bmp)' + fileType_img = '图片文件 (*.jpg *.jpeg *.gif *.png *.bmp)' + dir_lastAccess = os.getcwd() # 最后访问目录 + + def __init__(self): + super().__init__() + self.setMouseTracking(True) + self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint | QtCore.Qt.WindowType.WindowStaysOnTopHint) + self.initPainterTool() + self.initFunctionalFlag() + self.screenArea = ScreenArea(self) + self.toolbar = ScreenShotToolBar(self) + self.textInputWg = TextInputWidget(self) + # 设置 screenPixmap 为窗口背景 + # palette = QtGui.QPalette() + # palette.setBrush(QtGui.QPalette.ColorRole.Window, QtGui.QBrush(self.screenArea.screenPhysicalPixmapCopy())) + # self.setPalette(palette) + + def start(self): + self.screenArea.captureScreen() + self.setGeometry(self.screenArea.screenPhysicalRectF().toRect()) + self.clearScreenShotArea() + self.showFullScreen() + + def initPainterTool(self): + self.painter = QPainter() + self.color_transparent = QtCore.Qt.GlobalColor.transparent + self.color_black = QColor(0, 0, 0, 64) # 黑色背景 + self.color_lightBlue = QColor(30, 120, 255) # 浅蓝色。深蓝色QtCore.Qt.GlobalColor.blue + self.font_normal = QtGui.QFont('Times New Roman', 11, QtGui.QFont.Weight.Normal) + self.font_textInput = QtGui.QFont('微软雅黑', 16, QtGui.QFont.Weight.Normal) # 工具条文字工具默认字体 + self.pen_transparent = QPen(QtCore.Qt.PenStyle.NoPen) # 没有笔迹,画不出线条 + self.pen_white = QPen(QtCore.Qt.GlobalColor.white) + self.pen_SolidLine_lightBlue = QPen(self.color_lightBlue) # 实线,浅蓝色 + self.pen_SolidLine_lightBlue.setStyle(QtCore.Qt.PenStyle.DashLine) # 实线SolidLine,虚线DashLine,点线DotLine + self.pen_SolidLine_lightBlue.setWidthF(0) # 0表示线宽为1 + self.pen_DashLine_lightBlue = QPen(self.color_lightBlue) # 虚线,浅蓝色 + self.pen_DashLine_lightBlue.setStyle(QtCore.Qt.PenStyle.DashLine) + + def initFunctionalFlag(self): + self.hasScreenShot = False # 是否已通过拖动鼠标左键划定截图区域 + self.isCapturing = False # 正在拖动鼠标左键选定截图区域时 + self.isMoving = False # 在截图区域内拖动时 + self.isAdjusting = False # 在截图区域的边框按住鼠标左键调整大小时 + self.isDrawing = False # 是否已在截图区域内开始绘制 + self.isDrawRectangle = False # 正在截图区域内画矩形 + self.isDrawEllipse = False # 正在截图区域内画椭圆 + self.isDrawGraffiti = False # 正在截图区域内进行涂鸦 + self.isDrawText = False # 正在截图区域内画文字 + self.setCursor(QtCore.Qt.CursorShape.CrossCursor) # 设置鼠标样式 十字 + + def paintEvent(self, event): + centerRectF = self.screenArea.centerLogicalRectF() + screenSizeF = self.screenArea.screenLogicalSizeF() + canvasPixmap = self.screenArea.screenPhysicalPixmapCopy() + # canvasPixmap = QPixmap(screenSizeF.toSize()) + # canvasPixmap.fill(self.color_transparent) + # 在屏幕截图的副本上绘制已选定的截图区域 + self.painter.begin(canvasPixmap) + if self.hasScreenShot: + self.paintCenterArea(centerRectF) # 绘制中央截图区域 + self.paintMaskLayer(screenSizeF, fullScreen=False) # 绘制截图区域的周边区域遮罩层 + else: + self.paintMaskLayer(screenSizeF) + self.paintMagnifyingGlass(screenSizeF) # 在鼠标光标右下角显示放大镜 + self.paintToolbar(centerRectF, screenSizeF) # 在截图区域右下角显示工具条 + self.paintEditActions() # 在截图区域绘制编辑行为结果 + self.painter.end() + # 把画好的绘制结果显示到窗口上 + self.painter.begin(self) + self.painter.drawPixmap(0, 0, canvasPixmap) # 从坐标(0, 0)开始绘制 + self.painter.end() + + def paintCenterArea(self, centerRectF): + '''绘制已选定的截图区域''' + self.painter.setRenderHint(QPainter.RenderHint.Antialiasing, True) # 反走样 + # 1.绘制矩形线框 + self.painter.setPen(self.pen_DashLine_lightBlue) + self.painter.drawRect(centerRectF) + # 2.绘制矩形线框4个端点和4条边框的中间点 + if centerRectF.width() >= 100 and centerRectF.height() >= 100: + points = [ # 点坐标 + centerRectF.topLeft(), centerRectF.topRight(), centerRectF.bottomLeft(), centerRectF.bottomRight(), + self.screenArea.centerLeftMid(), self.screenArea.centerRightMid(), + self.screenArea.centerTopMid(), self.screenArea.centerBottomMid() + ] + blueDotRadius = QPointF(2, 2) # 椭圆蓝点 + self.painter.setBrush(self.color_lightBlue) + for point in points: + self.painter.drawEllipse(QRectF(point - blueDotRadius, point + blueDotRadius)) + # 3.在截图区域左上角显示截图区域宽高 + if centerRectF.topLeft().y() > 20: + labelPos = centerRectF.topLeft() + QPointF(5, -5) + else: # 拖拽截图区域到贴近屏幕上边缘时“宽x高”移动到截图区域左上角的下侧 + labelPos = centerRectF.topLeft() + QPointF(5, 15) + centerPhysicalRect = self.screenArea.centerPhysicalRectF().toRect() + self.painter.setPen(self.pen_white) + self.painter.setFont(self.font_normal) + self.painter.drawText(labelPos, '%s x %s' % (centerPhysicalRect.width(), centerPhysicalRect.height())) + # 4.在屏幕左上角预览截图结果 + # self.painter.drawPixmap(0, 0, self.screenArea.centerPhysicalPixmap()) # 从坐标(0, 0)开始绘制 + + def paintMaskLayer(self, screenSizeF, fullScreen=True): + if fullScreen: # 全屏遮罩层 + maskPixmap = QPixmap(screenSizeF.toSize()) + maskPixmap.fill(self.color_black) + self.painter.drawPixmap(0, 0, maskPixmap) + else: # 绘制截图区域的周边区域遮罩层,以凸显截图区域 + # 方法一:截图区域以外的8个方向区域 + # for area in self.screenArea.aroundAreaIn8Direction(): + # area = area.normalized() + # maskPixmap = QPixmap(area.size().toSize()) # 由于float转int的精度问题,可能会存在黑线条缝隙 + # maskPixmap.fill(self.color_black) + # self.painter.drawPixmap(area.topLeft(), maskPixmap) + # 方法二:截图区域以外的上下左右区域(有交集,交集部分颜色加深,有明显的纵横效果) + # for area in self.screenArea.aroundAreaIn4Direction(): + # maskPixmap = QPixmap(area.size().toSize()) + # maskPixmap.fill(self.color_black) + # self.painter.drawPixmap(area.topLeft(), maskPixmap) + # 方法三:截图区域以外的上下左右区域(无交集) + for area in self.screenArea.aroundAreaWithoutIntersection(): + maskPixmap = QPixmap(area.size().toSize()) + maskPixmap.fill(self.color_black) + self.painter.drawPixmap(area.topLeft(), maskPixmap) + + def paintMagnifyingGlass(self, screenSizeF, glassSize=150, offset=30, labelHeight=30): + '''未划定截图区域模式时、正在划定截取区域时、调整截取区域大小时在鼠标光标右下角显示放大镜 + glassSize:放大镜正方形边长 + offset:放大镜任意一个端点距离鼠标光标位置的最近距离 + labelHeight:pos和rgb两行文字的高度''' + if self.hasScreenShot and (not self.isCapturing) and (not self.isAdjusting): + return + pos = QtGui.QCursor.pos() + glassPixmap = self.screenArea.paintMagnifyingGlassPixmap(pos, glassSize) # 画好纵横十字线后的放大镜内QPixmap + # 限制放大镜显示不超出屏幕外 + glassRect = glassPixmap.rect() + if (pos.x() + glassSize + offset) < screenSizeF.width(): + if (pos.y() + offset + glassSize + labelHeight) < screenSizeF.height(): + glassRect.moveTo(pos + QPoint(offset, offset)) + else: + glassRect.moveBottomLeft(pos + QPoint(offset, -offset)) + else: + if (pos.y() + offset + glassSize + labelHeight) < screenSizeF.height(): + glassRect.moveTopRight(pos + QPoint(-offset, offset)) + else: + glassRect.moveBottomRight(pos + QPoint(-offset, -offset)) + self.painter.drawPixmap(glassRect.topLeft(), glassPixmap) + # 显示pos:(x, y)、rgb:(255,255,255) + qrgb = QtGui.QRgba64.fromArgb32(glassPixmap.toImage().pixel(glassPixmap.rect().center())) + labelRectF = QRectF(glassRect.bottomLeft().x(), glassRect.bottomLeft().y(), glassSize, labelHeight) + self.painter.setPen(self.pen_transparent) + self.painter.setBrush(self.color_black) # 黑底 + self.painter.drawRect(labelRectF) + self.painter.setPen(self.pen_white) + self.painter.setFont(self.font_normal) + self.painter.drawText(labelRectF, + QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignVCenter, + 'pos:(%s, %s)\nrgb:(%s, %s, %s)' % (pos.x(), pos.y(), qrgb.red8(), qrgb.green8(), qrgb.blue8())) + + def paintToolbar(self, centerRectF, screenSizeF): + '''在截图区域右下角显示工具条''' + if self.hasScreenShot: + if self.isCapturing or self.isAdjusting: + self.toolbar.hide() # 正在划定截取区域时、调整截图区域大小时不显示工具条 + else: + self.toolbar.adjustSize() + toolbarRectF = QRectF(self.toolbar.rect()) + # 工具条位置优先顺序:右下角下侧,右上角上侧,右下角上侧 + if (screenSizeF.height() - centerRectF.bottomRight().y()) > toolbarRectF.height(): + toolbarRectF.moveTopRight(centerRectF.bottomRight() + QPointF(-5, 5)) + elif centerRectF.topRight().y() > toolbarRectF.height(): + toolbarRectF.moveBottomRight(centerRectF.topRight() + QPointF(-5, -5)) + else: + toolbarRectF.moveBottomRight(centerRectF.bottomRight() + QPointF(-5, -5)) + # 限制工具条的x坐标不为负数,不能移出屏幕外 + if toolbarRectF.x() < 0: + pos = toolbarRectF.topLeft() + pos.setX(centerRectF.x() + 5) + toolbarRectF.moveTo(pos) + self.toolbar.move(toolbarRectF.topLeft().toPoint()) + self.toolbar.show() + else: + self.toolbar.hide() + + def paintEditActions(self): + '''在截图区域绘制编辑行为结果。编辑行为超出截图区域也无所谓,保存图像时只截取截图区域内''' + # 1.绘制正在拖拽编辑中的矩形、椭圆、涂鸦 + if self.isDrawRectangle: + self.screenArea.paintRectangle(self.painter, self.toolbar.curColor(), self.toolbar.curLineWidth()) + elif self.isDrawEllipse: + self.screenArea.paintEllipse(self.painter, self.toolbar.curColor(), self.toolbar.curLineWidth()) + elif self.isDrawGraffiti: + self.screenArea.paintGraffiti(self.painter, self.toolbar.curColor(), self.toolbar.curLineWidth()) + # 2.绘制所有已保存的编辑行为 + self.screenArea.paintEachEditAction(self.painter) + + def clearEditFlags(self): + self.isDrawing = False + self.isDrawRectangle = False + self.isDrawEllipse = False + self.isDrawGraffiti = False + self.isDrawText = False + + def exitEditMode(self): + '''退出编辑模式''' + self.clearEditFlags() + self.toolbar.onActionTriggered(None) # 清空工具条工具按钮选中状态 + self.textInputWg.hide() + + def clearScreenShotArea(self): + '''清空已划定的截取区域''' + self.screenArea.clearEditActions() # 清除已保存的编辑行为 + self.exitEditMode() + self.hasScreenShot = False + self.isCapturing = False + pos = QPointF() + self.screenArea.setCenterArea(pos, pos) + self.update() + self.setCursor(QtCore.Qt.CursorShape.CrossCursor) # 设置鼠标样式 十字 + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.MouseButton.LeftButton: + pos = event.position() + if self.hasScreenShot: + if self.isDrawRectangle or self.isDrawEllipse: + self.screenArea.setBeginEditPoint(pos) + elif self.isDrawGraffiti: # 保存涂鸦经过的每一个点 + self.screenArea.saveGraffitiPointF(pos, first=True) + elif self.isDrawText: + if self.isDrawing: + if QRectF(self.textInputWg.rect()).contains(pos): + pass # 在输入框内调整光标位置,忽略 + else: # 鼠标点到输入框之外,完成编辑 + self.screenArea.saveTextInputAction() + else: # 未开始编辑时(暂不支持文本拖拽) + action = self.screenArea.takeTextInputActionAt(pos) + if action: # 鼠标点到输入框之内,修改旧的文本输入 + self.textInputWg.loadTextInputBy(action) + else: # 鼠标点到输入框之外,开始新的文本输入 + self.screenArea.setBeginInputTextPoint(pos) + elif self.screenArea.isMousePosInCenterRectF(pos): + self.isMoving = True # 进入拖拽移动模式 + self.screenArea.setBeginDragPoint(pos) + else: + self.isAdjusting = True # 进入调整大小模式 + self.screenArea.setBeginAdjustPoint(pos) + else: + self.screenArea.setCenterArea(pos, pos) + self.isCapturing = True # 进入划定截图区域模式 + if event.button() == QtCore.Qt.MouseButton.RightButton: + if self.hasScreenShot or self.isCapturing: # 清空已划定的的截图区域 + self.clearScreenShotArea() + else: + self.close() + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.MouseButton.LeftButton: + if self.isDrawRectangle: + self.screenArea.saveRectangleAction() + elif self.isDrawEllipse: + self.screenArea.saveEllipseleAction() + elif self.isDrawGraffiti: + self.screenArea.saveGraffitiAction() + self.isCapturing = False + self.isMoving = False + self.isAdjusting = False + self.toolbar.show() + + def mouseMoveEvent(self, event): + pos = event.position() + if self.isDrawing: + if self.isDrawRectangle or self.isDrawEllipse: + self.screenArea.setEndEditPoint(pos) + elif self.isDrawGraffiti: + self.screenArea.saveGraffitiPointF(pos) + elif self.isCapturing: + self.hasScreenShot = True + self.screenArea.setEndPoint(pos, remake=True) + elif self.isMoving: + self.screenArea.moveCenterAreaTo(pos) + elif self.isAdjusting: + self.screenArea.adjustCenterAreaBy(pos) + self.update() + if self.hasScreenShot: + self.setCursor(self.screenArea.getMouseShapeBy(pos)) + else: + self.setCursor(QtCore.Qt.CursorShape.CrossCursor) # 设置鼠标样式 十字 + + def mouseDoubleClickEvent(self, event): + if event.button() == QtCore.Qt.MouseButton.LeftButton: + if self.screenArea.isMousePosInCenterRectF(event.position()): + self.save2Clipboard() + self.close() + + def keyPressEvent(self, QKeyEvent): + if QKeyEvent.key() == QtCore.Qt.Key.Key_Escape: + self.close() + if QKeyEvent.key() in (QtCore.Qt.Key.Key_Return, QtCore.Qt.Key.Key_Enter): # 大键盘、小键盘回车 + self.save2Clipboard() + self.close() + + def save2Clipboard(self): + '''将截图区域复制到剪贴板''' + if self.hasScreenShot: + mimData = QtCore.QMimeData() + mimData.setImageData(self.screenArea.centerPhysicalPixmap().toImage()) + QApplication.clipboard().setMimeData(mimData) + # self.screenArea.saveNightAreaImg() + self.close() + + def save2Local(self): + fileType = self.fileType_img + filePath, fileFormat = self.sys_selectSaveFilePath(self, fileType=fileType) + if filePath: + self.screenArea.centerPhysicalPixmap().save(filePath, quality=100) + self.close() + + def sys_getCurTime(self, fmt='%Y-%m-%d %H:%M:%S'): + '''获取字符串格式的当前时间''' + # return QtCore.QDateTime.currentDateTime().toString('yyyy-MM-dd hh:mm:ss') + return datetime.now().strftime(fmt) + + def sys_selectSaveFilePath(self, widget, title='选择文件保存路径', saveFileDir=None, + saveFileName='', defaultFileFmt='%Y%m%d%H%M%S', fileType=None): + '''选择文件保存路径 + title:选择窗口标题 + saveFileDir:指定保存目录 + saveFileName:默认保存文件名 + defaultFileFmt:不指定saveFileName时,自动以此格式的时间字符串命名文件 + fileType:可以选择的文件类型 + return:(所选的文件保存路径, 文件的类型) + ''' + options = QFileDialog.Option.ReadOnly + if saveFileName == '': + saveFileName = self.sys_getCurTime(defaultFileFmt) + if not saveFileDir: + saveFileDir = self.dir_lastAccess + saveFilePath = os.path.join(saveFileDir, saveFileName) + if not fileType: + fileType = self.fileType_all + filePath, fileFormat = QFileDialog.getSaveFileName(widget, title, saveFilePath, fileType, options=options) + if filePath: + self.dir_lastAccess = os.path.dirname(filePath) + return (filePath, fileFormat) + + +class MainWindow(QMainWindow): + + def __init__(self): + super(MainWindow, self).__init__() + self.setWindowTitle('自定义截图工具展示') + self.screenShotWg = ScreenShotWidget() + centralLayout = QVBoxLayout() + centralLayout.addWidget(QPushButton('开始截图', clicked=lambda: self.screenShot())) + centralLayout.addWidget(QPushButton('隐藏本窗口后截图', clicked=lambda: self.screenShot(True))) + centralWidget = QWidget() + centralWidget.setLayout(centralLayout) + self.setCentralWidget(centralWidget) + + def screenShot(self, hide=False): + if hide: + self.showMinimized() + self.screenShotWg.start() + + +if __name__ == '__main__': + app = QApplication(sys.argv) + translator = QtCore.QTranslator() # 颜色选取窗口、字体选择窗口中文化 + if translator.load("D:/dev/Python38/Lib/site-packages/PyQt6/Qt6/translations/qtbase_zh_CN.qm"): + app.installTranslator(translator) + main = MainWindow() + main.show() + sys.exit(app.exec()) + diff --git a/lesson1/链接.py b/lesson1/链接.py new file mode 100644 index 0000000..f241b6b --- /dev/null +++ b/lesson1/链接.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +create_action.triggered.connect(functools.partial(self.create_action_func, i))