1.demo文件用来显示算法计算完毕的分数
import torch
from torchvision import transforms
import skvideo.io
from PIL import Image
import numpy as np
from VSFA import VSFA
from CNNfeatures import get_features
from argparse import ArgumentParser
import time
import torch, gc
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
from pylab import *
gc.collect()
torch.cuda.empty_cache
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids=[1, 2, 3, 4, 5, 6, 7, 8])
if __name__ == '__main__':
parser = ArgumentParser(description='"Test Demo of VSFA')
parser.add_argument('--model_path', default='models/VSFA-KoNViD-1k-EXP2', type=str,
help='model path (default: models/VSFA-KoNViD-1k-EXP2)')
parser.add_argument('--video_path', default='./test.mp4', type=str,
help='video path (default: ./test.mp4)')
parser.add_argument('--video_format', default='RGB', type=str,
help='video format: RGB or YUV420 (default: RGB)')
parser.add_argument('--video_width', type=int, default=None,
help='video width')
parser.add_argument('--video_height', type=int, default=None,
help='video height')
parser.add_argument('--frame_batch_size', type=int, default=32,
help='frame batch size for feature extraction (default: 32)')
args = parser.parse_args(args=[])
device = torch.device("cpu")
start = time.time()
# data preparation
assert args.video_format == 'YUV420' or args.video_format == 'RGB'
if args.video_format == 'YUV420':
video_data = skvideo.io.vread(args.video_path, args.video_height, args.video_width,
inputdict={'-pix_fmt': 'yuvj420p'})
else:
video_data = skvideo.io.vread(args.video_path)
video_length = video_data.shape[0]
video_channel = video_data.shape[3]
video_height = video_data.shape[1]
video_width = video_data.shape[2]
transformed_video = torch.zeros([video_length, video_channel, video_height, video_width])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
for frame_idx in range(video_length):
frame = video_data[frame_idx]
frame = Image.fromarray(frame)
frame = transform(frame)
transformed_video[frame_idx] = frame
print('Video length: {}'.format(transformed_video.shape[0]))
# feature extraction
features = get_features(transformed_video, frame_batch_size=args.frame_batch_size, device=device)
features = torch.unsqueeze(features, 0) # batch size 1
# quality prediction using VSFA
model = VSFA()
model.load_state_dict(torch.load(args.model_path)) #
model.to(device)
model.eval()
with torch.no_grad():
input_length = features.shape[1] * torch.ones(1, 1)
outputs = model(features, input_length)
y_pred = outputs[0][0].to('cpu').numpy()
print("Predicted quality: {}".format(y_pred))
end = time.time()
print('Time: {} s'.format(end - start))
2.main文件里面有一个视频播放功能方法以及界面搭建代码
import numpy as np
import tkinter as tk
import os
import skvideo.io
from PIL import Image, ImageTk
from cv2 import cv2
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from tkinter import filedialog
from torchvision import transforms
from VSFA import VSFA
from CNNfeatures import get_features
from pylab import *
matplotlib.use('TkAgg')
from demo import *
def open_file():
videoPath = filedialog.askopenfilename()
video = cv2.VideoCapture(videoPath)
waitTime = 1000/video.get(5)
videoTime = video.get(7)/video.get(5)
while video.isOpened():
ret,readyFrame = video.read()
if ret == True:
videoFrame = cv2.cvtColor(readyFrame,cv2.COLOR_BGR2RGBA)
newImage = Image.fromarray(videoFrame).resize((520,320))
newCover = ImageTk.PhotoImage(image=newImage)
videoLable.configure(image=newCover)
videoLable.image = newCover
videoLable.update()
cv2.waitKey(int(waitTime))
else:
break
window = tk.Tk()
window.title('视频质量评估系统')
window.geometry('1080x720')
# 设置视频位置
videoLable = tk.Label(window,width=520,height=320,bd=0)
videoLable.place(x=50,y=200)
#设置分数位置
scoreLable=tk.Label(window,width=520,height=320,bd=0)
scoreLable.place(x=150,y=200)
# 题目文字
l1 = tk.Label(window,text='视频质量评估系统',bg='white',font=('Arial',20),width=15,height=2)
l1.place(x=400,y=50)
# 按钮设置
b1 = tk.Button(window,text='打开视频',width=15,height=2,command=open_file)
b1.place(x=200,y=620)
os.system('python demo.py')
window.mainloop()
现在的需求是想把demo里的三条print数据显示在界面上,跪求大佬解答
建议将demo.py改写成可调用模式,在main函数中导入,获取其中三个需显示在窗体的变量值,在主窗体上设置点击按钮调用计算函数,添加标签用于数据显示,也可用文本框显示。示例:
#demo.py
import time
def comp():
start=time.time()
x=f"result1:{sum(i**5-3*i*3-20 for i in range(10000))}"
y=f"result2:{sum(i**1/2+i%3 for i in range(10000))}"
end=time.time()
z=f"time:{end-start}"
return x,y,z
#main.py
import tkinter as tk
from demo import comp
window = tk.Tk()
window.title('视频质量评估系统')
window.geometry('1080x720')
# 设置视频位置
videoLable = tk.Label(window, width=520, height=320, bd=0)
videoLable.place(x=50, y=200)
#设置分数位置
scoreLable = tk.Label(window, width=520, height=320, bd=0)
scoreLable.place(x=150, y=200)
# 题目文字
l1 = tk.Label(window, text='视频质量评估系统', bg='white',
font=('Arial', 20), width=15, height=2)
l1.place(x=400, y=50)
# 按钮设置
b1 = tk.Button(window, text='打开视频', width=15, height=2) #command=open_file)
b1.place(x=100, y=620)
#os.system('python demo.py')
def get_comp():
x,y,z=comp()
c1 = tk.Label(window, text=x, font=('simhei',10),fg='blue',width=50, height=2)
c1.place(x=320, y=620)
c2 = tk.Label(window, text=y, font=('simhei',10,'italic'),fg='green',width=50, height=2)
c2.place(x=520, y=620)
c3 = tk.Label(window, text=z, font=('simhei',10,'underline'),fg='red',width=50, height=2)
c3.place(x=720, y=620)
b2=tk.Button(window, text='测试数据', width=15, height=2,command=get_comp)
b2.place(x=300, y=620)
window.mainloop()
您好,我是有问必答小助手,你的问题已经有小伙伴为您解答了问题,您看下是否解决了您的问题,可以追评进行沟通哦~
如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~
ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632
非常感谢您使用有问必答服务,为了后续更快速的帮您解决问题,现诚邀您参与有问必答体验反馈。您的建议将会运用到我们的产品优化中,希望能得到您的支持与协助!
速戳参与调研>>>https://t.csdnimg.cn/Kf0y