PyQt/lesson1/cv2流播放.py
2024-10-04 13:00:37 +08:00

3151 lines
122 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
import time
import vlc
import cv2
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
from multiprocessing import Process, Manager, freeze_support
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
def process_frame(info_dict, frame_dict):
List = []
import os
for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')):
# if s > 3:
# break
List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i))
f = 0
frams = 0
for s, n in enumerate(List):
if frams != 0:
print('dsds')
for j in range(f, frams):
print(j ,'lllllll')
frame_dict[j] = None
cap = cv2.VideoCapture(n)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frams = frameNum
info_dict[s] = frameNum
for i in range(frameNum):
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame_dict[f] = frame
f += 1
class Window(QLabel):
def __init__(self):
super(Window, self).__init__()
self.cacheInfo = {}
self.setAlignment(Qt.AlignCenter)
# self.setFixedSize(1280, 720)
self.cap = cv2.VideoCapture(r"C:\Users\ARTIST\Pictures\f\render_d20_050_V001.mov")
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret, frame = self.cap.read()
if ret:
self.capheight, self.capwidth, bytesPerComponent = frame.shape
self.capbutedperline = 3 * self.capwidth
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
qimg = QImage(
frame.data,
self.capwidth,
self.capheight,
self.capbutedperline,
QImage.Format.Format_RGB888,
)
pixmap = QPixmap.fromImage(qimg).scaled(1280, 720)
self.setPixmap(pixmap)
self.get()
self.cacheThread = CacheThread()
# self.cacheThread.cachedone.connect(self.cache_done)
# self.cacheThread.start()
# self.cacheFrames = {}
self.timer = QTimer()
self.timer.setInterval(1000 // 24) # 设置定时器间隔以确保每秒触发24次
self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数
self.counter = 0
# time.sleep(3)
manager = Manager()
self.cacheFrames = manager.dict()
p = Process(target=process_frame, args=(self.cacheInfo, self.cacheFrames,))
p.start()
self.timer.start()
self.sss = Get(self.cacheFrames, self.counter)
self.sss.start()
# @pyqtSlot()
# def generate_number(self):
# # print(self.cacheFrames)
# if self.counter in self.cacheFrames:
# qimg = QImage(
# cv2.imdecode(np.frombuffer(self.cacheFrames.get(self.counter), np.uint8), cv2.IMREAD_COLOR),
# self.capwidth,
# self.capheight,
# self.capbutedperline,
# QImage.Format.Format_RGB888,
# )
# pixmap = QPixmap.fromImage(qimg).scaled(1280, 720)
# self.setPixmap(pixmap)
# self.counter += 1
@pyqtSlot()
def generate_number(self):
if self.counter in self.cacheFrames:
frame = self.cacheFrames[self.counter]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
self.setPixmap(pixmap)
self.counter += 1
def cache_done(self, frame, data):
print('cache done', data.data)
# self.cacheFrames = self.cacheThread.cacheFrames
# self.cacheFrames[frame] = data
# self.capheight = self.cacheThread.capheight
# self.capwidth = self.cacheThread.capwidth
# self.capbutedperline = self.cacheThread.capbutedperline
# self.timer.start()
def get(self):
List = []
import os
for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')):
# if s > 3:
# break
List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i))
f = 0
for s, n in enumerate(List):
cap = cv2.VideoCapture(n)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.cacheInfo[s] = frameNum
print(self.cacheInfo)
def mousePressEvent(self, ev):
x = ev.pos().x()
i = int(float(x)/self.width() * sum([self.cacheInfo.get(i) for i in self.cacheInfo.keys()]))
if i in self.cacheFrames:
self.counter = i
class Get(QThread):
num = pyqtSignal(int)
def __init__(self, info=None, counter=None):
super(Get, self).__init__()
self.info = info
self.counter = counter
def run(self) -> None:
i = 0
while True:
# try:
for s in range(self.counter-10):
print('y', s)
if s < self.counter-20:
continue
# try:
self.info[s] = None
# except Exception as e:
# print('e', e)
# except Exception as a:
# print('a', a)
if i in self.info:
print(len(self.info))
self.num.emit(len(self.info))
class CacheThread(QThread):
cachedone = pyqtSignal(int, object)
def __init__(self):
super(CacheThread, self).__init__()
self.cacheFrames = {}
def run(self) -> None:
List = []
import os
for s, i in enumerate(os.listdir(r'T:\proj\LFC\FIND_MOV\b20')):
if s > 3:
break
List.append(os.path.join(r'T:\proj\LFC\FIND_MOV\b20', i))
f = 0
for n in List:
self.cap = cv2.VideoCapture(n)
self.frameNum = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
for i in range(self.frameNum):
ret, frame = self.cap.read()
if ret:
frame = cv2.resize(frame, (1280, 720))
self.capheight, self.capwidth, bytesPerComponent = frame.shape
self.capbutedperline = 3 * self.capwidth
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.cacheFrames[f+1] = frame
self.cachedone.emit(f+1, frame)
f += 1
print(f)
# self.cachedone.emit()
self.cap.release()
if __name__ == '__main__':
freeze_support()
app = QApplication(sys.argv)
widget = Window()
widget.show()
app.exec_()
# ++++++++++++++++++++++++++++++++++++++++++
import random
import threading
import time
import cv2
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
from multiprocessing import Process, Manager, Value, freeze_support
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
import pyaudio
from moviepy.editor import VideoFileClip
def get(frame, data):
for key, value in data.items():
if frame in value[1]:
return key, value[0]
def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0):
while True:
if path:
video = VideoFileClip(path)
cap = cv2.VideoCapture(path)
fps = cap.get(cv2.CAP_PROP_FPS)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb")
fps = cap.get(cv2.CAP_PROP_FPS)
audio = video.audio
if not audio:
au_if.value = 0
continue
p = pyaudio.PyAudio()
rat = audio.fps
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=rat,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
fr = int(rat/fps)
if (i % fr) == 0:
if i == 0:
print('event', i)
event.value = 1
time.sleep(1.5)
else:
time_.value = i
frame.value += 1
if (i % rat) == 0:
print('miao', s)
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
class T(QThread):
frame = pyqtSignal()
def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None):
super().__init__()
self.cache = cache
self.n_playframe = n_playframe
self.frames1 = frames1
self.playframe = playframe
self.time = time
def run(self):
while True:
# print('bububuubub')
if self.playframe.value in self.cache.keys():
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
else:
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
# self.time.value = self.playframe.value
# for i in range(2):
# pass
# # print(1111, self.n_playframe.value, self.frames1.value)
# # self.frame.emit()
# self.n_playframe.value = self.frames1.value
class PlayAudio(QThread):
frame = pyqtSignal()
def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0):
super().__init__()
self.cache = cache
self.playlist = playlist
self.playframe = playframe
self.n_playframe = n_playframe
self.time = time
self.select = select
self.test = test
self.start_time = start_time
self.timer = QTimer()
cap = cv2.VideoCapture(self.playlist[self.select.value])
self.fps = cap.get(cv2.CAP_PROP_FPS)
print(self.fps)
self.timer.setInterval(1000 // self.fps)
self.timer.timeout.connect(self.Es)
# self.timer.start()
self.audio = None
self.stream = None
self.buffer = []
self.sss = False
self.au_event = Manager().Value("i", 0)
self.frames = Manager().Value("i", 0)
self.frames1 = Manager().Value("i", 0)
self.frames2 = 0
self.au_time = Manager().Value("i", 0)
self.if_au = Manager().Value("i", 1)
self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au))
self.p.start()
self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache)
self.t.frame.connect(self.Es)
while True:
if self.au_event.value:
# self.timer.start()
self.frame.emit()
self.playframe.value += 1
self.t.start()
self.au_event.value = 0
break
elif self.if_au.value == 0:
self.timer.start()
break
def set(self, fps):
# self.timer.stop()
self.timer.setInterval(1000 // int(fps))
# self.timer.start()
# def run(self):
# index = None
# p = None
# if self.sss:
# return
# # while True:
# # # print(self.select.value)
# # # print(self.playlist[self.select.value])
# # if index != self.select.value:
# # if self.stream:
# # self.stream.close()
# # self.buffer = []
# # self.audio = None
# # self.stream = None
# # # if p:
# # # print('clor')
# # # p.terminate()
# #
# # index = self.select.value
# # video = VideoFileClip(self.playlist[self.select.value])
# # self.audio = video.audio
# # if self.audio:
# # rat = self.audio.fps
# #
# # p = pyaudio.PyAudio()
# # # 创建输出流
# # self.stream = p.open(format=pyaudio.paFloat32,
# # channels= self.audio.nchannels,
# # rate=rat,
# # output=True)
# # # print(playlist[select.value])
# # # 计算开始位置的帧数
# # start_frame = self.start_time
# #
# # # print(rat, 'rat')
# # # 设置缓冲区大小
# # buffer_size = 1024
# # else:
# # # print('play')
# #
# # if self.audio:
# # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)):
# # # print(i)
# # fr = int(rat/self.fps)
# # if (i % fr) == 0:
# # pass
# # # print('dddddddddddddd')
# # # self.frame.emit()
# # # print(self.cou)
# # if i >= start_frame:
# # self.buffer.append(chunk.astype('float32').tostring())
# # if len(self.buffer) >= buffer_size:
# # self.stream.write(b''.join(self.buffer))
# # self.buffer = []
# # # print(stream.get_time())
# # # 写入剩余数据
# # if self.buffer:
# # self.stream.write(b''.join(self.buffer))
# # p.terminate()
def run(self):
while True:
if self.frames1.value != self.frames.value:
self.frames1.value = self.frames.value
self.playframe.value += 1
# print('相差', self.frames.value, self.playframe.value)
# print(self.frames1)
self.frame.emit()
# print('run')
# self.playframe.
def Es(self):
pass
# print('es')
# self.terminate()
# while True:
# if abs(self.frames1 - self.n_playframe.value):
# for i in range(abs(self.frames1 - self.n_playframe.value)):
# print(1111)
self.frame.emit()
# if not self.if_au.value:
# self.frame.emit()
# return
# self.frames2 += 1
# if self.frames1 != self.frames2:
# print(self.frames1, self.frames2, self.frames.value, self.au_time)
# self.frame.emit()
# if self.audio:
# return
# if self.frames1 != self.frames.value:
# return
# self.frame.emit()
# while True:
def play(playFrame, buff):
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
while True:
# # print(playFrame.value, 'bdddd')
# if playFrame.value%25 == 0:
stream.write(b''.join(buff[int(playFrame.value)]))
# # print(self.playFrame.value, dir(self.buff), self.buff.keys())
def play_audios(playlist, select, test, start_time=0):
index = None
p = None
while True:
# print(playlist[select.value])
if index != select.value:
if p:
p.terminate()
index = select.value
video = VideoFileClip(playlist[select.value])
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# print(playlist[select.value])
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
else:
# print('play')
if audio:
o = 0
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if (i%1764) == 0:
test.value = o
o += 1
# print(o)
if i >= start_frame:
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def play_audio(path=None, start_time=0):
if path:
video = VideoFileClip(path)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
if i % (44100 / 25) == 0:
print(s, 'miao')
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def process_frame(playlist, playFrame, playindex, select, select_status, time, cache, frame_dict, playend, rod, hua):
# playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# print(playlist)
f = 1
op = time.value
mov = select.value
while True:
if mov == (len(playlist)-0):
if not playend.value:
select_status.value = 0
mov = select.value
op = time.value
f = time.value
cap.release()
frame_dict.clear()
continue
if f in frame_dict.keys():
f += 1
continue
playindex.value = mov
# print(select.value, mov)
# if select.value != mov and select_status.value:
# cap.release()
# mov = select.value
# frame_dict.clear()
# # select_status.value = 0
# if time.value != op:
# cap.release()
# op = time.value
# f = time.value
# playFrame.value = time.value
# frame_dict.clear()
# print(time.value, op, playFrame.value, len(frame_dict), mov)
# print(mov, select.value, playlist[mov], frame_dict.keys())
print('op' ,op, playindex.value)
# for s, n in enumerate(List[:]):
# if frams != 0:
# # print('dsds')
# for j in range(f, frams):
# # print(j ,'lllllll')
# frame_dict[j] = None
cap = cv2.VideoCapture(playlist[mov])
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(cv2.CAP_PROP_POS_FRAMES, op)
# print('cap', frameNum)
while cap.isOpened():
if rod.value:
continue
# # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd')
if select_status.value and hua.value == 0:
# print('sssss', select.value, time.value, select_status.value)
select_status.value = 0
mov = select.value
op = time.value
f = time.value
cap.release()
frame_dict.clear()
break
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# if frameNum < 220:
# ret, frame = cap.read()
# if ret:
# # if abs(f - playFrame.value) == 100:
# # if playFrame.value > 50:
# # if abs(f - playFrame.value) == 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# # else:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# cache.value = len(frame_dict) + playFrame.value
# # elif f < 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# else:
# mov += 1
# op = 0
# break
if len(frame_dict) < 500:
ret, frame = cap.read()
if ret:
# if abs(f - playFrame.value) == 100:
# if playFrame.value > 50:
# if abs(f - playFrame.value) == 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame_dict[f] = frame
f += 1
cache.value = len(frame_dict) + playFrame.value
# print('duqu')
# print('ret', ret, f, len(frame_dict))
# elif f < 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
else:
print('else', op, playindex.value)
if mov == (len(playlist)-1):
playend.value = 1
mov += 1
op = 0
break
else:
continue
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# frams = frameNum
# info_dict[s] = frameNum
# for i in range(frameNum):
# while True:
# if abs(f - playFrame.value) == 100:
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
# continue
class Window(QLabel):
def __init__(self):
super(Window, self).__init__()
self.select_ = None
# time.sleep(3)
manager = Manager()
self.cacheFrames = manager.dict()
manager = Manager()
self.cacheInfo = manager.dict()
manager = Manager()
self.playFrame = manager.Value('i', 1)
self.n_playFrame = manager.Value('i', 1)
self.playindex = manager.Value('i', 0)
self.playindexing = manager.Value('i', 0)
self.select = manager.Value('i', 0)
self.select_status = manager.Value('i', 0)
self.tiems = manager.Value('i', 0)
self.cache = manager.Value('i', 0)
self.playList = manager.list()
self.playend = manager.Value('i', 0)
self.rod = manager.Value('i', 0)
self.test = manager.Value('i', 0)
self.hua = manager.Value('i', 0)
self.playInit = manager.dict()
import os
for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i))
print(self.playList)
# self.playList.pop(0)
# self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb')
# manager = Manager()
# self.buff = manager.dict()
# self.generate_number1()
# self.cacheInfo = {}
self.setAlignment(Qt.AlignCenter)
self.resize(1280, 720)
self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb")
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
fps = self.cap.get(cv2.CAP_PROP_FPS)
self.get()
# self.timer = QTimer()
# self.timer1 = QTimer()
# self.timer.setInterval(1000 // fps) # 设置定时器间隔以确保每秒触发24次
# self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数
time.sleep(5)
for i in range(1):
p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua))
p.start()
# self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test))
# time.sleep(2)
# self.timer.start()
# self.ps.start()
# self.timer1.start()
# self.sss = Get(self.cache)
# self.sss.start()
self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames)
self.yy.frame.connect(self.generate_number)
self.yy.start()
@pyqtSlot()
def generate_number(self):
if self.playend.value and self.playFrame.value == self.cacheFrames.keys()[-1]+1:
# print('end', self.playFrame.value, self.cacheFrames.keys())
return
# if self.test.value:
# # print(self.playFrame.value, self.test.value)
# self.playFrame.value += self.test.value - 1
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
# self.setPixmap(pixmap)
# # del frame
# # del pixmap
# # del qImg
# if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50:
# # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300)
# self.cacheFrames.pop(self.playFrame.value-51)
# return
# print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() )
if self.playFrame.value not in self.cacheFrames:
if len(self.cacheFrames.keys()):
if self.playFrame.value > self.cacheFrames.keys()[-1]:
self.playFrame.value = self.cacheFrames.keys()[1]
elif self.playFrame.value < self.cacheFrames.keys()[0]:
self.playFrame.value = self.cacheFrames.keys()[1]
print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value)
return
if self.playFrame.value in self.cacheFrames:
while True:
try:
frame = self.cacheFrames[self.playFrame.value]
break
except:
continue
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
self.setPixmap(pixmap)
# del frame
# del pixmap
# del qImg
if self.playFrame.value-self.tiems.value > 250 and len(self.cacheFrames) > 250 and self.playFrame.value-251 in self.cacheFrames.keys():
# print(self.playFrame.value, self.cacheFrames.keys())
self.cacheFrames.pop(self.cacheFrames.keys()[0])
if self.playindexing.value == 0:
if self.playFrame.value > self.cacheInfo[self.playindexing.value]:
self.playindexing.value += 1
elif self.playindexing.value == len(self.cacheInfo.keys())-1:
pass
else:
if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
self.playindexing.value += 1
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))
# elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]))
# self.playindexing.value += 1
if self.playindexing.value == 0:
# print('当前帧0',
# self.playindexing.value, self.playFrame.value,
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# self.playFrame.value)
self.n_playFrame.value = self.playFrame.value
dframe = self.playFrame.value
zframe = self.playFrame.value
else:
if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]):
pass
# print('当前帧点击',
# '\n播放',self.playindexing.value,
# '\n当前',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value + 1
dframe = self.playFrame.value + 1
zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1
else:
# print('当前帧1',
# '\n播放',self.playindexing.value,
# '\n帧',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
zframe = self.playFrame.value
# if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))):
# print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# else:
# print('当前帧1', self.playindexing.value, self.playFrame.value,
# (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# print(self.playFrame.value)
print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value)
self.playFrame.value += 1
def generate_number1(self):
s = Process(target=play_audio)
s.start()
def get(self):
p = 1
ps = 1
for s, n in enumerate(self.playList):
cap = cv2.VideoCapture(n)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameNum1 = frameNum + ps
self.cacheInfo[s] = frameNum
self.playInit[s] = [[1, frameNum], [ps, frameNum1]]
ps = frameNum1 + 1
print(self.playInit.items())
def mousePressEvent(self, ev):
# self.timer.stop()
# if True:
# return
if ev.button() == Qt.LeftButton:
# print("Left mouse button pressed")
i = random.randint(0, len(self.playList)-1)
self.select_ = i
# s = random.randint(0, self.cacheInfo[i])
x = ev.pos().x()
ip = int(float(x) / self.width() * self.cacheInfo[i])
self.select_status.value = 1
print('select', i, ip, self.cacheInfo[i])
# print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames,
# self.cacheFrames.keys())
self.select.value = i
self.tiems.value = ip
self.playFrame.value = ip
self.playend.value = 0
self.playindexing.value = i
# if not self.timer.isActive():
# self.timer.start()
elif ev.button() == Qt.RightButton:
# print("Right mouse button pressed")
if self.yy.timer.isActive():
self.yy.timer.stop()
print(self.playInit[3])
frame = random.randint(0, self.playInit[self.playInit.keys()-1][1][1])
print(frame)
# if get()
if self.playFrame.value in self.cacheFrames:
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
frame = self.cacheFrames[self.playFrame.value]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# 假设qImg是您的QImage对象
file_path = "%s.jpg" % self.playFrame.value # 保存图片的文件路径
# 保存QImage对象为图片文件
qImg.save(file_path)
# print('stop')
else:
# print('start')
self.yy.timer.start()
def mouseMoveEvent(self, ev):
self.yy.timer.stop()
x = ev.pos().x()
ip = int(float(x) / self.width() * self.cacheInfo[self.select_])
# self.select_status.value = 1
# print('select', self.select_, ip, self.cacheInfo[self.select_])
self.tiems.value = ip - 1
self.playFrame.value = ip - 1
self.hua.value = 1
print(self.cacheFrames.keys())
if self.playFrame.value in self.cacheFrames:
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
frame = self.cacheFrames[self.playFrame.value]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
self.setPixmap(pixmap)
super().mouseMoveEvent(ev)
def mouseReleaseEvent(self, ev):
self.hua.value = 0
cap = cv2.VideoCapture(self.playList[self.select.value])
fps = cap.get(cv2.CAP_PROP_FPS)
self.yy.set(fps)
# time.sleep(1)
self.yy.timer.start()
super().mouseReleaseEvent(ev)
def mouseDoubleClickEvent(self, a0):
if self.rod.value:
self.rod.value = 0
else:
self.rod.value = 1
super().mouseDoubleClickEvent(a0)
def closeEvent(self, a0):
self.p.kill()
class Get(QThread):
num = pyqtSignal(int)
def __init__(self, cache=None):
super(Get, self).__init__()
self.cache = cache
def run(self) -> None:
while True:
self.cache.value
if __name__ == '__main__':
freeze_support()
app = QApplication([])
widget = Window()
widget.show()
# cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(frameNum)
app.exec_()
# ++++++++++++++++++++++++++++++++++++++++++++++++++++
import random
import threading
import time
import cv2
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
from multiprocessing import Process, Manager, Value, freeze_support
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
import pyaudio
from moviepy.editor import VideoFileClip
def get(frame, data):
for key, value in data.items():
if frame in list(range(value[1][0], value[1][1] + 1)):
if key == 0:
return key, value[0], frame - value[1][0] + 1
else:
return key, value[0], frame - value[1][0]
def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0):
while True:
if path:
video = VideoFileClip(path)
audio = video.audio
if not audio:
au_if.value = 0
continue
cap = cv2.VideoCapture(path)
fps = cap.get(cv2.CAP_PROP_FPS)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
audio = video.audio
if not audio:
au_if.value = 0
continue
cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb")
fps = cap.get(cv2.CAP_PROP_FPS)
p = pyaudio.PyAudio()
rat = audio.fps
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=rat,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
fr = int(rat/fps)
if (i % fr) == 0:
if i == 0:
# print('event', i)
event.value = 1
time.sleep(1.5)
else:
time_.value = i
frame.value += 1
if (i % rat) == 0:
# print('miao', s)
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
class T(QThread):
frame = pyqtSignal()
def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None):
super().__init__()
self.cache = cache
self.n_playframe = n_playframe
self.frames1 = frames1
self.playframe = playframe
self.time = time
def run(self):
while True:
# print('bububuubub')
if self.playframe.value in self.cache.keys():
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
else:
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
# self.time.value = self.playframe.value
# for i in range(2):
# pass
# # print(1111, self.n_playframe.value, self.frames1.value)
# # self.frame.emit()
# self.n_playframe.value = self.frames1.value
class PlayAudio(QThread):
frame = pyqtSignal()
def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0):
super().__init__()
self.cache = cache
self.playlist = playlist
self.playframe = playframe
self.n_playframe = n_playframe
self.time = time
self.select = select
self.test = test
self.start_time = start_time
self.timer = QTimer()
cap = cv2.VideoCapture(self.playlist[self.select.value])
self.fps = cap.get(cv2.CAP_PROP_FPS)
print(self.fps)
self.timer.setInterval(1000 // self.fps)
self.timer.timeout.connect(self.Es)
# self.timer.start()
self.audio = None
self.stream = None
self.buffer = []
self.sss = False
self.au_event = Manager().Value("i", 0)
self.frames = Manager().Value("i", 0)
self.frames1 = Manager().Value("i", 0)
self.frames2 = 0
self.au_time = Manager().Value("i", 0)
self.if_au = Manager().Value("i", 1)
if not self.sss:
# self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au))
# self.p.start()
self.p = threading.Thread(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au))
self.p.start()
self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache)
self.t.frame.connect(self.Es)
while True:
if self.sss:
self.timer.start()
break
if self.au_event.value:
# self.timer.start()
self.frame.emit()
self.playframe.value += 1
self.t.start()
self.au_event.value = 0
break
elif self.if_au.value == 0:
self.timer.start()
break
def set(self, fps):
# self.timer.stop()
self.timer.setInterval(1000 // int(fps))
# self.timer.start()
# def run(self):
# index = None
# p = None
# if self.sss:
# return
# # while True:
# # # print(self.select.value)
# # # print(self.playlist[self.select.value])
# # if index != self.select.value:
# # if self.stream:
# # self.stream.close()
# # self.buffer = []
# # self.audio = None
# # self.stream = None
# # # if p:
# # # print('clor')
# # # p.terminate()
# #
# # index = self.select.value
# # video = VideoFileClip(self.playlist[self.select.value])
# # self.audio = video.audio
# # if self.audio:
# # rat = self.audio.fps
# #
# # p = pyaudio.PyAudio()
# # # 创建输出流
# # self.stream = p.open(format=pyaudio.paFloat32,
# # channels= self.audio.nchannels,
# # rate=rat,
# # output=True)
# # # print(playlist[select.value])
# # # 计算开始位置的帧数
# # start_frame = self.start_time
# #
# # # print(rat, 'rat')
# # # 设置缓冲区大小
# # buffer_size = 1024
# # else:
# # # print('play')
# #
# # if self.audio:
# # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)):
# # # print(i)
# # fr = int(rat/self.fps)
# # if (i % fr) == 0:
# # pass
# # # print('dddddddddddddd')
# # # self.frame.emit()
# # # print(self.cou)
# # if i >= start_frame:
# # self.buffer.append(chunk.astype('float32').tostring())
# # if len(self.buffer) >= buffer_size:
# # self.stream.write(b''.join(self.buffer))
# # self.buffer = []
# # # print(stream.get_time())
# # # 写入剩余数据
# # if self.buffer:
# # self.stream.write(b''.join(self.buffer))
# # p.terminate()
def run(self):
while True:
if self.frames1.value != self.frames.value:
self.frames1.value = self.frames.value
self.playframe.value += 1
# print('相差', self.frames.value, self.playframe.value)
# print(self.frames1)
self.frame.emit()
# print('run')
# self.playframe.
def Es(self):
pass
# print('es')
# self.terminate()
# while True:
# if abs(self.frames1 - self.n_playframe.value):
# for i in range(abs(self.frames1 - self.n_playframe.value)):
# print(1111)
self.frame.emit()
# if not self.if_au.value:
# self.frame.emit()
# return
# self.frames2 += 1
# if self.frames1 != self.frames2:
# print(self.frames1, self.frames2, self.frames.value, self.au_time)
# self.frame.emit()
# if self.audio:
# return
# if self.frames1 != self.frames.value:
# return
# self.frame.emit()
# while True:
def play(playFrame, buff):
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
while True:
# # print(playFrame.value, 'bdddd')
# if playFrame.value%25 == 0:
stream.write(b''.join(buff[int(playFrame.value)]))
# # print(self.playFrame.value, dir(self.buff), self.buff.keys())
def play_audios(playlist, select, test, start_time=0):
index = None
p = None
while True:
# print(playlist[select.value])
if index != select.value:
if p:
p.terminate()
index = select.value
video = VideoFileClip(playlist[select.value])
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# print(playlist[select.value])
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
else:
# print('play')
if audio:
o = 0
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if (i%1764) == 0:
test.value = o
o += 1
# print(o)
if i >= start_frame:
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def play_audio(path=None, start_time=0):
if path:
video = VideoFileClip(path)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
if i % (44100 / 25) == 0:
print(s, 'miao')
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def process_frame(playlist, playFrame, playindex, select, select_status, times, cache, frame_dict, playend, rod, hua):
# playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# print(playlist)
f = 1
op = times.value
mov = select.value
while True:
if mov == (len(playlist)-0):
if not playend.value:
select_status.value = 0
mov = select.value
op = times.value
f = times.value
cap.release()
frame_dict.clear()
continue
playindex.value = mov
# print(select.value, mov)
# if select.value != mov and select_status.value:
# cap.release()
# mov = select.value
# frame_dict.clear()
# # select_status.value = 0
# if time.value != op:
# cap.release()
# op = time.value
# f = time.value
# playFrame.value = time.value
# frame_dict.clear()
# print(time.value, op, playFrame.value, len(frame_dict), mov)
# print(mov, select.value, playlist[mov], frame_dict.keys())
# print('op' ,op, playindex.value)
# for s, n in enumerate(List[:]):
# if frams != 0:
# # print('dsds')
# for j in range(f, frams):
# # print(j ,'lllllll')
# frame_dict[j] = None
cap = cv2.VideoCapture(playlist[mov])
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(cv2.CAP_PROP_POS_FRAMES, op)
# print('cap', frameNum)
while cap.isOpened():
if rod.value:
continue
# # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd')
if select_status.value and hua.value == 0:
# time.sleep(3)
# print('sssss', select.value, time.value, select_status.value)
select_status.value = 0
mov = select.value
op = times.value
f = times.value
cap.release()
frame_dict.clear()
break
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# if frameNum < 220:
# ret, frame = cap.read()
# if ret:
# # if abs(f - playFrame.value) == 100:
# # if playFrame.value > 50:
# # if abs(f - playFrame.value) == 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# # else:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# cache.value = len(frame_dict) + playFrame.value
# # elif f < 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# else:
# mov += 1
# op = 0
# break
if len(frame_dict) < 120:
ret, frame = cap.read()
if ret:
# if abs(f - playFrame.value) == 100:
# if playFrame.value > 50:
# if abs(f - playFrame.value) == 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame_dict[f] = frame
f += 1
cache.value = len(frame_dict) + playFrame.value
# print('duqu')
# print('ret', ret, f, len(frame_dict))
# elif f < 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
else:
# print('else', op, playindex.value)
if mov == (len(playlist)-1):
playend.value = 1
mov += 1
op = 0
break
else:
continue
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# frams = frameNum
# info_dict[s] = frameNum
# for i in range(frameNum):
# while True:
# if abs(f - playFrame.value) == 100:
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
# continue
class Window(QLabel):
def __init__(self):
super(Window, self).__init__()
self.select_ = None
# time.sleep(3)
manager = Manager()
self.cacheFrames = manager.dict()
manager = Manager()
self.cacheInfo = manager.dict()
manager = Manager()
self.playFrame = manager.Value('i', 1)
self.n_playFrame = manager.Value('i', 1)
self.playindex = manager.Value('i', 0)
self.playindexing = manager.Value('i', 0)
self.select = manager.Value('i', 0)
self.select_status = manager.Value('i', 0)
self.tiems = manager.Value('i', 0)
self.cache = manager.Value('i', 0)
self.playList = manager.list()
self.playend = manager.Value('i', 0)
self.rod = manager.Value('i', 0)
self.test = manager.Value('i', 0)
self.hua = manager.Value('i', 0)
self.playInit = manager.dict()
import os
for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i))
print(self.playList)
# self.playList.pop(0)
# self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb')
# manager = Manager()
# self.buff = manager.dict()
# self.generate_number1()
# self.cacheInfo = {}
self.setAlignment(Qt.AlignCenter)
self.resize(1280, 720)
self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb")
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
fps = self.cap.get(cv2.CAP_PROP_FPS)
self.get()
# self.timer = QTimer()
# self.timer1 = QTimer()
# self.timer.setInterval(1000 // fps) # 设置定时器间隔以确保每秒触发24次
# self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数
# time.sleep(5)
for i in range(1):
self.p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua))
self.p.start()
# self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test))
# time.sleep(2)
# self.timer.start()
# self.ps.start()
# self.timer1.start()
# self.sss = Get(self.cache)
# self.sss.start()
self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames)
self.yy.frame.connect(self.generate_number)
self.yy.start()
@pyqtSlot()
def generate_number(self):
# print(self.playend.value, self.playFrame.value , self.playInit[self.playInit.keys()[-1]][-1][-1]+1)
# print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() )
if self.playFrame.value >= self.playInit[self.playInit.keys()[-1]][-1][-1]+1:
# print('end', self.playFrame.value, self.cacheFrames.keys())
self.p.terminate()
self.cacheFrames.clear()
print('dsdsdsdsdsds')
return
# if self.test.value:
# # print(self.playFrame.value, self.test.value)
# self.playFrame.value += self.test.value - 1
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
# self.setPixmap(pixmap)
# # del frame
# # del pixmap
# # del qImg
# if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50:
# # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300)
# self.cacheFrames.pop(self.playFrame.value-51)
# return
if self.playFrame.value not in self.cacheFrames:
if len(self.cacheFrames.keys()) and not self.playend.value:
try:
if self.playFrame.value > self.cacheFrames.keys()[-1]:
self.playFrame.value = self.cacheFrames.keys()[0]
elif self.playFrame.value < self.cacheFrames.keys()[0]:
self.playFrame.value = self.cacheFrames.keys()[0]
except:
self.playFrame.value = self.tiems.value
# print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value)
return
if self.playFrame.value in self.cacheFrames:
try:
frame = self.cacheFrames[self.playFrame.value]
except:
return
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
self.setPixmap(pixmap)
# file_path = "%s_%s.jpg" % (self.playList[self.playindexing], self.playFrame.value) # 保存图片的文件路径
#
# # 保存QImage对象为图片文件
# qImg.save(file_path)
# del frame
# del pixmap
# del qImg
# print('key1')
if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) > 50 and self.playFrame.value-51 in self.cacheFrames.keys():
# print(self.playFrame.value, self.cacheFrames.keys())
self.cacheFrames.pop(self.cacheFrames.keys()[0])
if self.playindexing.value == 0:
if self.playFrame.value > self.cacheInfo[self.playindexing.value]:
self.playindexing.value += 1
elif self.playindexing.value == len(self.cacheInfo.keys())-1:
pass
else:
if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
self.playindexing.value += 1
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))
# elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]))
# self.playindexing.value += 1
if self.playindexing.value == 0:
# print('当前帧0',
# self.playindexing.value, self.playFrame.value,
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# self.playFrame.value)
self.n_playFrame.value = self.playFrame.value
dframe = self.playFrame.value
zframe = self.playFrame.value
else:
if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]):
pass
# print('当前帧点击',
# '\n播放',self.playindexing.value,
# '\n当前',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value + 1
dframe = self.playFrame.value + 1
zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1
else:
# print('当前帧1',
# '\n播放',self.playindexing.value,
# '\n帧',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
zframe = self.playFrame.value
# if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))):
# print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# else:
# print('当前帧1', self.playindexing.value, self.playFrame.value,
# (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# print(self.playFrame.value)
# print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value)
self.playFrame.value += 1
# print(self.playFrame.value)
def generate_number1(self):
s = Process(target=play_audio)
s.start()
def get(self):
p = 1
ps = 1
for s, n in enumerate(self.playList):
cap = cv2.VideoCapture(n)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameNum1 = frameNum + ps
self.cacheInfo[s] = frameNum
self.playInit[s] = [[1, frameNum], [ps, frameNum1]]
ps = frameNum1 + 1
print(self.playInit.items())
def mousePressEvent(self, ev):
# self.timer.stop()
# if True:
# return
if ev.button() == Qt.LeftButton:
# # print("Left mouse button pressed")
# # i = random.randint(0, len(self.playList)-1)
# # self.select_ = i
#
#
# # # s = random.randint(0, self.cacheInfo[i])
x = ev.pos().x()
frames = int(float(x) / self.width() * self.playInit[self.playInit.keys()[-1]][-1][-1])
# print(frames)
# # self.select_status.value = 1
# # print('select', i, ip, self.cacheInfo[i])
# # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames,
# # # self.cacheFrames.keys())
#
# # frames = random.randint(0, self.playInit[3][1][1])
print(frames)
try:
print(self.playInit[self.playInit.keys()[-1]][-1][-1])
except:
pass
if get(frames, self.playInit):
sele = get(frames, self.playInit)
print(sele)
# if frames in self.cacheFrames:
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[frames]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# # 假设qImg是您的QImage对象
# file_path = "%s_%s.jpg" % (sele[-1], frames) # 保存图片的文件路径
#
# # 保存QImage对象为图片文件
# qImg.save(file_path)
# # print('stop')
#
self.select_status.value = 1
self.select_ = sele[0]
self.select.value = sele[0]
self.tiems.value = sele[-1] -1
self.playFrame.value = sele[-1] -1
self.playend.value = 0
self.playindexing.value = sele[0]
# print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value)
# # if not self.timer.isActive():
# # self.timer.start()
time.sleep(2)
print(frames, sele[-1], self.cacheFrames.keys())
if sele[-1]-1 in self.cacheFrames:
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
frame = self.cacheFrames[sele[-1]-1]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# 假设qImg是您的QImage对象
print('保存',self.playList, self.select.value, self.playList[self.select.value])
file_path = "%s_%s_%s.jpg" % (self.playList[self.select.value].split('/')[-1], sele[-1], frames) # 保存图片的文件路径
# 保存QImage对象为图片文件
qImg.save(file_path)
# # print("Left mouse button pressed")
# i = random.randint(0, len(self.playList) - 1)
# self.select_ = i
# # s = random.randint(0, self.cacheInfo[i])
# x = ev.pos().x()
# ip = int(float(x) / self.width() * self.cacheInfo[i])
# self.select_status.value = 1
# print('select', i, ip, self.cacheInfo[i])
# # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames,
# # self.cacheFrames.keys())
# self.select.value = i
# self.tiems.value = ip
# self.playFrame.value = ip
# self.playend.value = 0
# self.playindexing.value = i
# print('select', i, ip, self.cacheInfo[i])
# print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value)
# # if not self.timer.isActive():
# # self.timer.start()
elif ev.button() == Qt.RightButton:
# print("Right mouse button pressed")
if self.yy.timer.isActive():
self.yy.timer.stop()
# print(self.playInit[3])
else:
# print('start')
self.yy.timer.start()
def mouseMoveEvent(self, ev):
# self.yy.timer.stop()
# # x = ev.pos().x()
# # ip = int(float(x) / self.width() * self.cacheInfo[self.select_])
# # self.select_status.value = 1
# # print('select', self.select_, ip, self.cacheInfo[self.select_])
# # self.tiems.value = ip - 1
# # self.playFrame.value = ip - 1
# # self.hua.value = 1
# # print(self.cacheFrames.keys())
# if self.playFrame.value in self.cacheFrames:
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
# self.setPixmap(pixmap)
super().mouseMoveEvent(ev)
def mouseReleaseEvent(self, ev):
# self.hua.value = 0
print(self.playList[self.select.value])
cap = cv2.VideoCapture(self.playList[self.select.value])
fps = cap.get(cv2.CAP_PROP_FPS)
self.yy.set(fps)
# time.sleep(1)
self.yy.timer.start()
super().mouseReleaseEvent(ev)
def mouseDoubleClickEvent(self, a0):
if self.rod.value:
self.rod.value = 0
else:
self.rod.value = 1
super().mouseDoubleClickEvent(a0)
def closeEvent(self, a0):
# self.yy.terminate()
# self.yy.deleteLater()
# self.p.terminate()
self.p.join()
# self.p.close()
# self.p.kill()
# self.yy.p.terminate()
self.yy.p.join()
# self.yy.p.close()
# self.yy.p.kill()
super().closeEvent(a0)
# sys.exit()
class Get(QThread):
num = pyqtSignal(int)
def __init__(self, cache=None):
super(Get, self).__init__()
self.cache = cache
def run(self) -> None:
while True:
self.cache.value
if __name__ == '__main__':
freeze_support()
app = QApplication([])
widget = Window()
widget.show()
# cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(frameNum)
app.exec_()
# +++++++++++++++++++++++++++++++++++++++++++
import random
import threading
import time
import cv2
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
from multiprocessing import Process, Manager, Value, freeze_support
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
import pyaudio
from moviepy.editor import VideoFileClip
def get(frame, data):
for key, value in data.items():
if frame in list(range(value[1][0], value[1][1] + 1)):
if key == 0:
return key, value[0], frame - value[1][0] + 1
else:
return key, value[0], frame - value[1][0]
def play_audioss(path=None, frame=None, event=None, time_=None, au_if=None, start_time=0):
while True:
if path:
video = VideoFileClip(path)
audio = video.audio
if not audio:
au_if.value = 0
continue
cap = cv2.VideoCapture(path)
fps = cap.get(cv2.CAP_PROP_FPS)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
audio = video.audio
if not audio:
au_if.value = 0
continue
cap = cv2.VideoCapture("[J2] 銀魂 002.rmvb")
fps = cap.get(cv2.CAP_PROP_FPS)
p = pyaudio.PyAudio()
rat = audio.fps
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=rat,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
fr = int(rat/fps)
if (i % fr) == 0:
if i == 0:
# print('event', i)
event.value = 1
time.sleep(1.5)
else:
time_.value = i
frame.value += 1
if (i % rat) == 0:
# print('miao', s)
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
class T(QThread):
frame = pyqtSignal()
def __init__(self, n_playframe=None, frames1=None, playframe=None, time=None, cache=None):
super().__init__()
self.cache = cache
self.n_playframe = n_playframe
self.frames1 = frames1
self.playframe = playframe
self.time = time
def run(self):
while True:
# print('bububuubub')
if self.playframe.value in self.cache.keys():
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
else:
if self.playframe.value < self.frames1.value - 1:
self.playframe.value += 1
elif self.playframe.value > self.frames1.value:
self.playframe.value -= 1
# self.time.value = self.playframe.value
# for i in range(2):
# pass
# # print(1111, self.n_playframe.value, self.frames1.value)
# # self.frame.emit()
# self.n_playframe.value = self.frames1.value
class PlayAudio(QThread):
frame = pyqtSignal()
def __init__(self, playlist, playframe, select, test, n_playframe, time, cache, start_time=0):
super().__init__()
self.cache = cache
self.playlist = playlist
self.playframe = playframe
self.n_playframe = n_playframe
self.time = time
self.select = select
self.test = test
self.start_time = start_time
self.timer = QTimer()
cap = cv2.VideoCapture(self.playlist[self.select.value])
self.fps = cap.get(cv2.CAP_PROP_FPS)
print(self.fps)
self.timer.setInterval(1000 // self.fps)
self.timer.timeout.connect(self.Es)
# self.timer.start()
self.audio = None
self.stream = None
self.buffer = []
self.sss = False
self.au_event = Manager().Value("i", 0)
self.frames = Manager().Value("i", 0)
self.frames1 = Manager().Value("i", 0)
self.frames2 = 0
self.au_time = Manager().Value("i", 0)
self.if_au = Manager().Value("i", 1)
if not self.sss:
self.p = Process(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au))
self.p.start()
#
# self.p = threading.Thread(target=play_audioss, args=(self.playlist[self.select.value], self.frames, self.au_event, self.au_time, self.if_au))
# self.p.start()
self.t = T(self.n_playframe, self.frames1, self.playframe, self.time, self.cache)
self.t.frame.connect(self.Es)
while True:
if self.sss:
self.timer.start()
break
if self.au_event.value:
# self.timer.start()
self.frame.emit()
self.playframe.value += 1
self.t.start()
self.au_event.value = 0
break
elif self.if_au.value == 0:
self.timer.start()
break
def set(self, fps):
# self.timer.stop()
self.timer.setInterval(1000 // int(fps))
# self.timer.start()
# def run(self):
# index = None
# p = None
# if self.sss:
# return
# # while True:
# # # print(self.select.value)
# # # print(self.playlist[self.select.value])
# # if index != self.select.value:
# # if self.stream:
# # self.stream.close()
# # self.buffer = []
# # self.audio = None
# # self.stream = None
# # # if p:
# # # print('clor')
# # # p.terminate()
# #
# # index = self.select.value
# # video = VideoFileClip(self.playlist[self.select.value])
# # self.audio = video.audio
# # if self.audio:
# # rat = self.audio.fps
# #
# # p = pyaudio.PyAudio()
# # # 创建输出流
# # self.stream = p.open(format=pyaudio.paFloat32,
# # channels= self.audio.nchannels,
# # rate=rat,
# # output=True)
# # # print(playlist[select.value])
# # # 计算开始位置的帧数
# # start_frame = self.start_time
# #
# # # print(rat, 'rat')
# # # 设置缓冲区大小
# # buffer_size = 1024
# # else:
# # # print('play')
# #
# # if self.audio:
# # for i, chunk in enumerate(self.audio.iter_frames(with_times=False)):
# # # print(i)
# # fr = int(rat/self.fps)
# # if (i % fr) == 0:
# # pass
# # # print('dddddddddddddd')
# # # self.frame.emit()
# # # print(self.cou)
# # if i >= start_frame:
# # self.buffer.append(chunk.astype('float32').tostring())
# # if len(self.buffer) >= buffer_size:
# # self.stream.write(b''.join(self.buffer))
# # self.buffer = []
# # # print(stream.get_time())
# # # 写入剩余数据
# # if self.buffer:
# # self.stream.write(b''.join(self.buffer))
# # p.terminate()
def run(self):
while True:
if self.frames1.value != self.frames.value:
self.frames1.value = self.frames.value
self.playframe.value += 1
# print('相差', self.frames.value, self.playframe.value)
# print(self.frames1)
self.frame.emit()
# print('run')
# self.playframe.
def Es(self):
pass
# print('es')
# self.terminate()
# while True:
# if abs(self.frames1 - self.n_playframe.value):
# for i in range(abs(self.frames1 - self.n_playframe.value)):
# print(1111)
self.frame.emit()
# if not self.if_au.value:
# self.frame.emit()
# return
# self.frames2 += 1
# if self.frames1 != self.frames2:
# print(self.frames1, self.frames2, self.frames.value, self.au_time)
# self.frame.emit()
# if self.audio:
# return
# if self.frames1 != self.frames.value:
# return
# self.frame.emit()
# while True:
def play(playFrame, buff):
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
while True:
# # print(playFrame.value, 'bdddd')
# if playFrame.value%25 == 0:
stream.write(b''.join(buff[int(playFrame.value)]))
# # print(self.playFrame.value, dir(self.buff), self.buff.keys())
def play_audios(playlist, select, test, start_time=0):
index = None
p = None
while True:
# print(playlist[select.value])
if index != select.value:
if p:
p.terminate()
index = select.value
video = VideoFileClip(playlist[select.value])
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# print(playlist[select.value])
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
else:
# print('play')
if audio:
o = 0
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if (i%1764) == 0:
test.value = o
o += 1
# print(o)
if i >= start_frame:
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def play_audio(path=None, start_time=0):
if path:
video = VideoFileClip(path)
else:
video = VideoFileClip("[J2] 銀魂 002.rmvb")
audio = video.audio
p = pyaudio.PyAudio()
# 创建输出流
stream = p.open(format=pyaudio.paFloat32,
channels=2,
rate=44100,
output=True)
# 计算开始位置的帧数
start_frame = start_time
# 设置缓冲区大小
buffer_size = 1024
buffer = []
s = 0
# 逐帧播放音频
for i, chunk in enumerate(audio.iter_frames(with_times=False)):
# print(i)
if i >= start_frame:
if i % (44100 / 25) == 0:
print(s, 'miao')
s += 1
buffer.append(chunk.astype('float32').tostring())
if len(buffer) >= buffer_size:
stream.write(b''.join(buffer))
buffer = []
# print(stream.get_time())
# 写入剩余数据
if buffer:
stream.write(b''.join(buffer))
p.terminate()
def process_frame(playlist, playFrame, playindex, select, select_status, times, cache, frame_dict, playend, rod, hua):
# playlist.append('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# print(playlist)
f = 1
op = times.value
mov = select.value
while True:
if mov == (len(playlist)-0):
if not playend.value:
select_status.value = 0
mov = select.value
op = times.value
f = times.value
cap.release()
frame_dict.clear()
continue
playindex.value = mov
# print(select.value, mov)
# if select.value != mov and select_status.value:
# cap.release()
# mov = select.value
# frame_dict.clear()
# # select_status.value = 0
# if time.value != op:
# cap.release()
# op = time.value
# f = time.value
# playFrame.value = time.value
# frame_dict.clear()
# print(time.value, op, playFrame.value, len(frame_dict), mov)
# print(mov, select.value, playlist[mov], frame_dict.keys())
# print('op' ,op, playindex.value)
# for s, n in enumerate(List[:]):
# if frams != 0:
# # print('dsds')
# for j in range(f, frams):
# # print(j ,'lllllll')
# frame_dict[j] = None
cap = cv2.VideoCapture(playlist[mov])
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(cv2.CAP_PROP_POS_FRAMES, op)
# print('cap', frameNum)
while cap.isOpened():
if rod.value:
continue
# # print(playFrame.value, f, len(frame_dict), 'dfdfdfdfdfd')
if select_status.value and hua.value == 0:
# time.sleep(3)
# print('sssss', select.value, time.value, select_status.value)
select_status.value = 0
mov = select.value
op = times.value
f = times.value
cap.release()
frame_dict.clear()
break
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# if frameNum < 220:
# ret, frame = cap.read()
# if ret:
# # if abs(f - playFrame.value) == 100:
# # if playFrame.value > 50:
# # if abs(f - playFrame.value) == 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# # else:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# cache.value = len(frame_dict) + playFrame.value
# # elif f < 100:
# # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# # frame_dict[f] = frame
# # f += 1
# else:
# mov += 1
# op = 0
# break
if len(frame_dict) < 120:
ret, frame = cap.read()
if ret:
# if abs(f - playFrame.value) == 100:
# if playFrame.value > 50:
# if abs(f - playFrame.value) == 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame_dict[f] = frame
f += 1
cache.value = len(frame_dict) + playFrame.value
# print('duqu')
# print('ret', ret, f, len(frame_dict))
# elif f < 100:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
else:
# print('else', op, playindex.value)
if mov == (len(playlist)-1):
playend.value = 1
mov += 1
op = 0
break
else:
continue
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# frams = frameNum
# info_dict[s] = frameNum
# for i in range(frameNum):
# while True:
# if abs(f - playFrame.value) == 100:
# ret, frame = cap.read()
# if ret:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# frame_dict[f] = frame
# f += 1
# else:
# continue
class Window(QLabel):
def __init__(self):
super(Window, self).__init__()
self.select_ = None
# time.sleep(3)
manager = Manager()
self.cacheFrames = manager.dict()
manager = Manager()
self.cacheInfo = manager.dict()
manager = Manager()
self.playFrame = manager.Value('i', 1)
self.n_playFrame = manager.Value('i', 1)
self.playindex = manager.Value('i', 0)
self.playindexing = manager.Value('i', 0)
self.select = manager.Value('i', 0)
self.select_status = manager.Value('i', 0)
self.tiems = manager.Value('i', 0)
self.cache = manager.Value('i', 0)
self.playList = manager.list()
self.playend = manager.Value('i', 0)
self.rod = manager.Value('i', 0)
self.test = manager.Value('i', 0)
self.hua = manager.Value('i', 0)
self.playInit = manager.dict()
import os
for s, i in enumerate(os.listdir('/home/jcen/Videos/新建文件夹')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
self.playList.append(os.path.join(r'/home/jcen/Videos/新建文件夹', i))
print(self.playList)
# self.playList.pop(0)
# self.playList.append('/home/jcen/Videos/新建文件夹/[J2] 銀魂 002.rmvb')
# manager = Manager()
# self.buff = manager.dict()
# self.generate_number1()
# self.cacheInfo = {}
self.setAlignment(Qt.AlignCenter)
self.resize(1280, 720)
self.cap = cv2.VideoCapture(r"/home/jcen/Videos/[J2] 銀魂 002.rmvb")
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
fps = self.cap.get(cv2.CAP_PROP_FPS)
self.get()
# self.timer = QTimer()
# self.timer1 = QTimer()
# self.timer.setInterval(1000 // fps) # 设置定时器间隔以确保每秒触发24次
# self.timer.timeout.connect(self.generate_number) # 连接定时器的timeout信号到生成数字的槽函数
# time.sleep(5)
for i in range(1):
self.p = Process(target=process_frame, args=(self.playList, self.playFrame, self.playindex, self.select, self.select_status, self.tiems, self.cache, self.cacheFrames, self.playend, self.rod, self.hua))
self.p.start()
# self.ps = Process(target=play_audios, args=(self.playList, self.playindex, self.test))
# time.sleep(2)
# self.timer.start()
# self.ps.start()
# self.timer1.start()
# self.sss = Get(self.cache)
# self.sss.start()
self.yy = PlayAudio(self.playList, self.playFrame, self.playindex, self.test, self.n_playFrame, self.tiems, self.cacheFrames)
self.yy.frame.connect(self.generate_number)
self.yy.start()
@pyqtSlot()
def generate_number(self):
# print(self.playend.value, self.playFrame.value , self.playInit[self.playInit.keys()[-1]][-1][-1]+1)
# print(len(self.cacheFrames.keys()), self.playFrame.value , self.playFrame.value in self.cacheFrames, len(self.cacheFrames.keys()),self.cacheFrames.keys() )
if self.playFrame.value >= self.playInit[self.playInit.keys()[-1]][-1][-1]+1:
# print('end', self.playFrame.value, self.cacheFrames.keys())
self.p.terminate()
self.cacheFrames.clear()
print('dsdsdsdsdsds')
return
# if self.test.value:
# # print(self.playFrame.value, self.test.value)
# self.playFrame.value += self.test.value - 1
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
# self.setPixmap(pixmap)
# # del frame
# # del pixmap
# # del qImg
# if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) >50:
# # # print(self.playFrame.value-self.tiems.value, self.playFrame.value-300)
# self.cacheFrames.pop(self.playFrame.value-51)
# return
if self.playFrame.value not in self.cacheFrames:
if len(self.cacheFrames.keys()) and not self.playend.value:
try:
if self.playFrame.value > self.cacheFrames.keys()[-1]:
self.playFrame.value = self.cacheFrames.keys()[0]
elif self.playFrame.value < self.cacheFrames.keys()[0]:
self.playFrame.value = self.cacheFrames.keys()[0]
except:
self.playFrame.value = self.tiems.value
# print(self.playFrame.value, 'sssssssssssssss', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), self.playindexing.value)
return
if self.playFrame.value in self.cacheFrames:
try:
frame = self.cacheFrames[self.playFrame.value]
except:
return
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
self.setPixmap(pixmap)
# file_path = "%s_%s.jpg" % (self.playList[self.playindexing], self.playFrame.value) # 保存图片的文件路径
#
# # 保存QImage对象为图片文件
# qImg.save(file_path)
# del frame
# del pixmap
# del qImg
# print('key1')
if self.playFrame.value-self.tiems.value > 50 and len(self.cacheFrames) > 50 and self.playFrame.value-51 in self.cacheFrames.keys():
# print(self.playFrame.value, self.cacheFrames.keys())
self.cacheFrames.pop(self.cacheFrames.keys()[0])
if self.playindexing.value == 0:
if self.playFrame.value > self.cacheInfo[self.playindexing.value]:
self.playindexing.value += 1
elif self.playindexing.value == len(self.cacheInfo.keys())-1:
pass
else:
if self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
self.playindexing.value += 1
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))
# elif self.playFrame.value > sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]):
# print('z', sum([self.cacheInfo[i] for i in range(self.playindexing.value + 1)]))
# self.playindexing.value += 1
if self.playindexing.value == 0:
# print('当前帧0',
# self.playindexing.value, self.playFrame.value,
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# self.playFrame.value)
self.n_playFrame.value = self.playFrame.value
dframe = self.playFrame.value
zframe = self.playFrame.value
else:
if self.playFrame.value < sum([self.cacheInfo[i] for i in range(self.playindexing.value)]):
pass
# print('当前帧点击',
# '\n播放',self.playindexing.value,
# '\n当前',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n总帧', self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value + 1
dframe = self.playFrame.value + 1
zframe = self.playFrame.value + sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) + 1
else:
# print('当前帧1',
# '\n播放',self.playindexing.value,
# '\n帧',self.playFrame.value,
# '\n总',sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# '\n当前',self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
# )
self.n_playFrame.value = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
dframe = self.playFrame.value - sum([self.cacheInfo[i] for i in range(self.playindexing.value)])
zframe = self.playFrame.value
# if self.playFrame.value != abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))):
# print('当前帧1', self.playindexing.value, self.playFrame.value, (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])), sum([self.cacheInfo[i] for i in range(self.playindexing.value)]), abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# else:
# print('当前帧1', self.playindexing.value, self.playFrame.value,
# (sum([self.cacheInfo[i] for i in range(self.playindexing.value)])),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]),
# sum([self.cacheInfo[i] for i in range(self.playindexing.value)]) - abs(self.playFrame.value - (sum([self.cacheInfo[i] for i in range(self.playindexing.value)]))))
# print(self.playFrame.value)
# print('\n实际播放', self.playFrame.value, '\n播放',self.playindexing.value, '\n当前帧', dframe, '\n总的当前帧', zframe, '\n镜头帧', self.n_playFrame.value)
self.playFrame.value += 1
# print(self.playFrame.value)
def generate_number1(self):
s = Process(target=play_audio)
s.start()
def get(self):
p = 1
ps = 1
for s, n in enumerate(self.playList):
cap = cv2.VideoCapture(n)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameNum1 = frameNum + ps
self.cacheInfo[s] = frameNum
self.playInit[s] = [[1, frameNum], [ps, frameNum1]]
ps = frameNum1 + 1
print(self.playInit.items())
def mousePressEvent(self, ev):
# self.timer.stop()
# if True:
# return
if ev.button() == Qt.LeftButton:
# # print("Left mouse button pressed")
# # i = random.randint(0, len(self.playList)-1)
# # self.select_ = i
#
#
# # # s = random.randint(0, self.cacheInfo[i])
x = ev.pos().x()
frames = int(float(x) / self.width() * self.playInit[self.playInit.keys()[-1]][-1][-1])
# print(frames)
# # self.select_status.value = 1
# # print('select', i, ip, self.cacheInfo[i])
# # # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames,
# # # self.cacheFrames.keys())
#
# # frames = random.randint(0, self.playInit[3][1][1])
print(frames)
try:
print(self.playInit[self.playInit.keys()[-1]][-1][-1])
except:
pass
if get(frames, self.playInit):
sele = get(frames, self.playInit)
print(sele)
# if frames in self.cacheFrames:
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[frames]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# # 假设qImg是您的QImage对象
# file_path = "%s_%s.jpg" % (sele[-1], frames) # 保存图片的文件路径
#
# # 保存QImage对象为图片文件
# qImg.save(file_path)
# # print('stop')
#
self.select_status.value = 1
self.select_ = sele[0]
self.select.value = sele[0]
self.tiems.value = sele[-1] -1
self.playFrame.value = sele[-1] -1
self.playend.value = 0
self.playindexing.value = sele[0]
# print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value)
# # if not self.timer.isActive():
# # self.timer.start()
time.sleep(2)
print(frames, sele[-1], self.cacheFrames.keys())
if sele[-1]-1 in self.cacheFrames:
# if self.counter/1000 == 0:
# # print(len(self.cacheFrames), 'len(self.cacheFrames)')
frame = self.cacheFrames[sele[-1]-1]
self.h, self.w, ch = frame.shape
bytesPerLine = ch * self.w
# frame = cv2.resize(frame, (1280, 720))
qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# 假设qImg是您的QImage对象
print('保存',self.playList, self.select.value, self.playList[self.select.value])
file_path = "%s_%s_%s.jpg" % (self.playList[self.select.value].split('/')[-1], sele[-1], frames) # 保存图片的文件路径
# 保存QImage对象为图片文件
qImg.save(file_path)
# # print("Left mouse button pressed")
# i = random.randint(0, len(self.playList) - 1)
# self.select_ = i
# # s = random.randint(0, self.cacheInfo[i])
# x = ev.pos().x()
# ip = int(float(x) / self.width() * self.cacheInfo[i])
# self.select_status.value = 1
# print('select', i, ip, self.cacheInfo[i])
# # print(len(self.cacheFrames.keys()), self.playFrame.value, self.playFrame.value in self.cacheFrames,
# # self.cacheFrames.keys())
# self.select.value = i
# self.tiems.value = ip
# self.playFrame.value = ip
# self.playend.value = 0
# self.playindexing.value = i
# print('select', i, ip, self.cacheInfo[i])
# print(self.select.value, self.tiems.value, self.playFrame.value, self.playindexing.value)
# # if not self.timer.isActive():
# # self.timer.start()
elif ev.button() == Qt.RightButton:
# print("Right mouse button pressed")
if self.yy.timer.isActive():
self.yy.timer.stop()
# print(self.playInit[3])
else:
# print('start')
self.yy.timer.start()
def mouseMoveEvent(self, ev):
# self.yy.timer.stop()
# # x = ev.pos().x()
# # ip = int(float(x) / self.width() * self.cacheInfo[self.select_])
# # self.select_status.value = 1
# # print('select', self.select_, ip, self.cacheInfo[self.select_])
# # self.tiems.value = ip - 1
# # self.playFrame.value = ip - 1
# # self.hua.value = 1
# # print(self.cacheFrames.keys())
# if self.playFrame.value in self.cacheFrames:
# # if self.counter/1000 == 0:
# # # print(len(self.cacheFrames), 'len(self.cacheFrames)')
# frame = self.cacheFrames[self.playFrame.value]
# self.h, self.w, ch = frame.shape
# bytesPerLine = ch * self.w
# # frame = cv2.resize(frame, (1280, 720))
# qImg = QImage(frame.data, self.w, self.h, bytesPerLine, QImage.Format_RGB888)
# pixmap = QPixmap.fromImage(qImg).scaled(self.width(), self.height())
# self.setPixmap(pixmap)
super().mouseMoveEvent(ev)
def mouseReleaseEvent(self, ev):
# self.hua.value = 0
print(self.playList[self.select.value])
cap = cv2.VideoCapture(self.playList[self.select.value])
fps = cap.get(cv2.CAP_PROP_FPS)
self.yy.set(fps)
# time.sleep(1)
self.yy.timer.start()
super().mouseReleaseEvent(ev)
def mouseDoubleClickEvent(self, a0):
if self.rod.value:
self.rod.value = 0
else:
self.rod.value = 1
super().mouseDoubleClickEvent(a0)
def closeEvent(self, a0):
# self.yy.terminate()
# self.yy.deleteLater()
# self.p.terminate()
self.p.join()
# self.p.close()
# self.p.kill()
# self.yy.p.terminate()
self.yy.p.join()
# self.yy.p.close()
# self.yy.p.kill()
super().closeEvent(a0)
# sys.exit()
class Get(QThread):
num = pyqtSignal(int)
def __init__(self, cache=None):
super(Get, self).__init__()
self.cache = cache
def run(self) -> None:
while True:
self.cache.value
if __name__ == '__main__':
freeze_support()
app = QApplication([])
widget = Window()
widget.show()
# cap = cv2.VideoCapture('/home/jcen/Videos/[J2] 銀魂 002.rmvb')
# frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(frameNum)
app.exec_()
# +++++++++++++++++++++++++++++++++++++++++++++++
# coding: utf-8
from multiprocessing import Manager, Process
import cv2
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication
class PlayInit(QThread):
def __init__(self, ):
super().__init__()
self.play_list_data = Manager().dict()
self.playInfo = Manager().dict()
self.play_list = None
self.update = False
self.end = False
def set(self, play_list):
self.play_list = play_list
self.play_list_data.clear()
self.update = True
def run(self):
while True:
if self.play_list:
if self.update:
for s, i in enumerate(self.play_list):
# print(i)
self.p = Process(target=self.init, args=(s, i,))
self.p.start()
self.update = False
while True:
if len(self.play_list_data.keys()) == len(self.play_list):
# ps = 1
# for i in sorted(self.play_list_data.keys()):
# # print(i, '0000')
# if i == 0:
# # print('0', i)
# self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [1, self.play_list_data[i][0][1]]]
# else:
# # print('dd', i-1, self.play_list_data[i-1])
# frameNum1 = self.play_list_data[i-1][0][1] + 1
# self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [frameNum1, frameNum1+self.play_list_data[i][0][1]]]
# ps = frameNum1 + 1
# # print(self.play_list_data[i])
self.end = True
# print('ssssssss')
break
def init(self, index, play):
# print(index)
cap = cv2.VideoCapture(play)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.playInfo[index] = frameNum
# self.cacheInfo[s] = frameNum
self.play_list_data[index] = [[1, frameNum]]
while True:
try:
self.play_list_data[0]
if index == 0:
# print('0', index)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[1, self.play_list_data[index][0][1]]]
else:
if index == 1:
# print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)]))
frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1
# print(frameNum1)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[frameNum1, frameNum1 + self.play_list_data[index][0][1]]]
else:
# print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)]))
frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1
# print(frameNum1)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[frameNum1+1, frameNum1 + self.play_list_data[index][0][1]]]
break
except:
# print(index)
continue
if __name__ == '__main__':
import sys
import os
app = QApplication([])
playList = []
for s, i in enumerate(os.listdir('/home/jcen/Videos')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
playList.append(os.path.join(r'/home/jcen/Videos', i))
s = PlayInit()
s.start()
s.set(playList)
while True:
if s.end:
print(s.play_list_data.items())
print(s.playInfo.items())
break
# print(playList)
app.exec_()
# coding: utf-8
from multiprocessing import Manager, Process
import cv2
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication
class PlayInit(QThread):
def __init__(self, ):
super().__init__()
self.play_list_data = Manager().dict()
self.playInfo = Manager().dict()
self.play_list = None
self.update = False
self.end = False
def set(self, play_list):
self.play_list = play_list
self.play_list_data.clear()
self.update = True
def run(self):
while True:
if self.play_list:
if self.update:
for s, i in enumerate(self.play_list):
# print(i)
self.p = Process(target=self.init, args=(s, i,))
self.p.start()
self.update = False
while True:
if len(self.play_list_data.keys()) == len(self.play_list):
# ps = 1
# for i in sorted(self.play_list_data.keys()):
# # print(i, '0000')
# if i == 0:
# # print('0', i)
# self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [1, self.play_list_data[i][0][1]]]
# else:
# # print('dd', i-1, self.play_list_data[i-1])
# frameNum1 = self.play_list_data[i-1][0][1] + 1
# self.play_list_data[i] = [[self.play_list_data[i][0][0], self.play_list_data[i][0][1]], [frameNum1, frameNum1+self.play_list_data[i][0][1]]]
# ps = frameNum1 + 1
# # print(self.play_list_data[i])
self.end = True
# print('ssssssss')
break
def init(self, index, play):
# print(index)
cap = cv2.VideoCapture(play)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.playInfo[index] = frameNum
# self.cacheInfo[s] = frameNum
self.play_list_data[index] = [[1, frameNum]]
while True:
try:
self.play_list_data[0]
if index == 0:
# print('0', index)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[1, self.play_list_data[index][0][1]]]
else:
if index == 1:
# print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)]))
frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1
# print(frameNum1)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[frameNum1, frameNum1 + self.play_list_data[index][0][1]]]
else:
# print('dd', index-1, self.play_list_data[index-1], self.play_list_data[index] , sum([self.play_list_data[i][0][1] for i in range(index)]))
frameNum1 = sum([self.play_list_data[i][0][1] for i in range(index)]) + 1
# print(frameNum1)
self.play_list_data[index] = [[self.play_list_data[index][0][0], self.play_list_data[index][0][1]],
[frameNum1+1, frameNum1 + self.play_list_data[index][0][1]]]
break
except:
# print(index)
continue
if __name__ == '__main__':
import sys
import os
app = QApplication([])
playList = []
for s, i in enumerate(os.listdir('/home/jcen/Videos')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
playList.append(os.path.join(r'/home/jcen/Videos', i))
s = PlayInit()
s.start()
s.set(playList)
while True:
if s.end:
print(s.play_list_data.items())
print(s.playInfo.items())
break
# print(playList)
app.exec_()
# ++++++++++++++++++++++++++++
# coding: utf-8
import time
from multiprocessing import Manager, Process
import cv2
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication
class PlayInit(QThread):
frame = pyqtSignal()
def __init__(self, ):
super().__init__()
self.update = False
self.end = False
manager = Manager()
self.playList = manager.list()
self.playList_data = manager.dict()
self.playInfo = manager.dict()
self.playCache = manager.dict()
self.playFrame = manager.Value('i', 1)
self.playIndex = manager.Value('i', 0)
self.playFps = manager.Value('i', 0)
self.playListFps = manager.dict()
self.count = manager.Value('i', 0)
self.run_count = manager.Value('i', 0)
self.run_object = {}
def set(self, playList):
for i in playList:
self.playList.append(i)
self.playList_data.clear()
self.update = True
def set_fps(self, fps):
self.playFps.value = fps
def set_run_count(self, i):
self.run_count.value = i
def run(self):
while True:
if self.playList:
if self.update:
for s, i in enumerate(self.playList):
self.p = Process(target=self.init, args=(s, i, self.count, self.playListFps))
self.p.start()
self.update = False
while True:
if len(self.playList_data.keys()) == len(self.playList):
self.end = True
break
if self.end:
interval = 1 / 24.0
while True:
start_time = time.time()
self.frame.emit()
elapsed_time = time.time() - start_time
wait_time = interval - elapsed_time
if wait_time > 0:
time.sleep(wait_time)
def init(self, index, play, count, fps):
count.value += 1
cap = cv2.VideoCapture(play)
frameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
Fps = cap.get(cv2.CAP_PROP_FPS)
fps[index] = Fps
self.playInfo[index] = frameNum
self.playList_data[index] = [[1, frameNum]]
while True:
try:
self.playList_data[0]
if index == 0:
self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]],
[1, self.playList_data[index][0][1]]]
else:
if index == 1:
frameNum1 = sum([self.playList_data[i][0][1] for i in range(index)]) + 1
self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]],
[frameNum1, frameNum1 + self.playList_data[index][0][1]]]
else:
frameNum1 = sum([self.playList_data[i][0][1] for i in range(index)]) + 1
self.playList_data[index] = [[self.playList_data[index][0][0], self.playList_data[index][0][1]],
[frameNum1+1, frameNum1 + self.playList_data[index][0][1]]]
break
except:
continue
# while True:
# if count.value - index > count.value - run_count.value:
# # print(index, '\n')
# ret, frame = cap.read()
# if ret:
# # print(ret, index, '\n')
# continue
def process_frame(self, run_count):
for i in range(run_count):
p = Process(target=self.run_, args=(i,))
self.run_object[i] = p
p.start()
time.sleep(5)
self.run_object[0].terminate()
self.run_object[0].join()
def run_(self, i):
while True:
print( '\n=========', i, '=========\n')
continue
if __name__ == '__main__':
import sys
import os
app = QApplication([])
playList = []
for s, i in enumerate(os.listdir('/home/jcen/Videos')):
if i.split('.')[-1] in ['rmvb', 'mp4', 'mov']:
playList.append(os.path.join(r'/home/jcen/Videos', i))
s = PlayInit()
s.start()
s.set(playList)
# s.process_frame(1)
while True:
if s.end:
# ret, frame = s.cap[0].read()
# if ret:
# print(ret)
print(s.playList_data.items())
print(s.playInfo.items(), s.count.value, s.playListFps.items())
break
# print(playList)
app.exec_()