Skip to content

Instantly share code, notes, and snippets.

@figgis
Last active April 18, 2024 15:55
Show Gist options
  • Save figgis/de815690047d247e7e84 to your computer and use it in GitHub Desktop.
Save figgis/de815690047d247e7e84 to your computer and use it in GitHub Desktop.
vqm 2 xlsx
#!/usr/bin/env python
#
# vqm Run and parse the output of mitsuLinuxMultithread.
# Generate a xlsx-file with all the data as well as a summary sheet.
# Tested on Linux only.
#
# USAGE: vqm path
#
# NOTE: YCbcr files needs to be adhere to the following naming convention:
# NAME_WIDTHxHEIGHT_FPS_...yuv
# Example: BQMall_832x480_60.yuv
#
# NOTE: mitsuLinuxMultithread must reside in the same folder as this python
# program
#
# Copyright (c) 2015 Fredrik Pihl, Fredrik.Pihl@axis.com
# Licensed under The MIT License (MIT)
#
# 24-Sep-2015 Fredrik Pihl Created this.
from __future__ import print_function
from StringIO import StringIO
import argparse
import glob
import numpy as np
import operator
import os.path
import pickle
import subprocess
import xlsxwriter
MAPPING = {
'frame': 0,
'blockiness': 1,
'spatialactivity': 2,
'letterbox': 3,
'pillarbox': 4,
'blockloss': 5,
'blur': 6,
'temporalact': 7,
'blackout': 8,
'freezing': 9,
'exposure': 10,
'contrast': 11,
'brightness': 12,
'interlace': 13,
'noise': 14,
'slice': 15,
'flickering': 16,
}
class VQM(object):
'''
VQM Class
'''
def __init__(self, path, numframes=None):
self.path = path
self.numframes = numframes
self.files = []
self.data = []
self.__get_files()
self.__gen_data()
# Uncomment line to generate a simple pickle file with all the data
#self.__end()
def __get_files(self):
'''
Do globbing to get all *.yuv files in path
'''
# Sort the file names to have a nice grouping in the output
self.files = sorted(glob.glob(os.path.join(self.path, '*.yuv')))
def __gen_data(self):
'''
Generate data structure for each file in self.files
'''
for i in self.files:
p, w, h, f = self.__extract_param(i)
self.__parse_data(p, self.__run_cmd(i, w, h, f, self.numframes))
@staticmethod
def __run_cmd(p, w, h, f, n):
'''
Run binary on each file
p - path
w - width
h - height
f - file
n - numframes
'''
if n is None:
cmd = ['./mitsuLinuxMultithread', str(p), str(w), str(h), str(f)]
else:
cmd = ['./mitsuLinuxMultithread',
str(p), str(w), str(h), str(f), str(n)]
try:
print('Parsing {} ...'.format(os.path.basename(p)))
result = subprocess.check_output(cmd)
except:
print('ERROR')
return result
def __parse_data(self, key, result):
'''
Parse the output from the binary and put into an array of dictionaries
'''
# Use the basename as the key
self.data.append(
(key, np.genfromtxt(
# StringIO(result), skip_header=5,skip_footer=1, unpack=True)
StringIO(result), skip_header=5, skip_footer=1)
)
)
def __debug(self):
'''
debug
'''
pass
def __end(self):
'''
Use this method to generate a pickle file of the
parsed data
'''
output = open('data.pkl', 'wb')
# Pickle dictionary using protocol 0.
pickle.dump(self.data, output)
output.close()
for i in self.data:
print(i[0])
@staticmethod
def __extract_param(path):
'''
Extract basename, width, height and fps from filename
'''
f = os.path.basename(path)
fields = f.split('_')
width, height = map(int, fields[1].split('x'))
fps = fields[2].split('.')[0]
return (f, width, height, fps)
def create_xlsx(self):
'''
Generate a xlsx workbook
'''
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook('vqm.xlsx')
header = sorted(MAPPING.items(), key=operator.itemgetter(1))
# loop over all entries in db and create a worksheet
# for all entries
sheets = len(self.data)
for sheet in range(sheets):
# Add worksheet
worksheet = workbook.add_worksheet()
# Write filename in cell A1
worksheet.write(0, 0, self.data[sheet][0])
# Write header
for col, x in enumerate(header):
worksheet.write(1, col, x[0])
lines = len(self.data[sheet][1])
for row in range(lines):
data = self.data[sheet][1][row]
for col, x in enumerate(data):
worksheet.write(row+2, col, x)
# loop over all entries in db and create a summary worksheet
worksheet = workbook.add_worksheet('summary')
# Widen the first column to make the text clearer.
width = max([len(f[0]) for f in self.data])
worksheet.set_column('A:A', width)
# Write header
for col, x in enumerate(header):
worksheet.write(0, col+1, x[0])
# Write summary data, i.e. average value of all columns
# for each sheet
sheets = len(self.data)
for sheet in range(sheets):
# Write filename in cell A1
worksheet.write(sheet+1, 0, self.data[sheet][0])
# Add summation formulas
startrow = 3
lines = len(self.data[sheet][1])
for col, x in enumerate(header):
c = chr(ord('A')+col)
formula = "=AVERAGE(Sheet{}{}{}{}:{}{})".format(
sheet+1, '!', c, startrow, c, startrow+lines-1)
worksheet.write_formula(sheet+1, col+1, formula)
# Write number of frames for each sequence
formula = "=MAX(Sheet{}{}{}{}:{}{})+1".format(
sheet+1, '!', 'A', startrow, 'A', startrow+lines-1)
worksheet.write_formula(sheet+1, 1, formula)
workbook.close()
def main():
'''
Main function
'''
parser = argparse.ArgumentParser(
description='Extract metrics from all YCbCr files in folder',
epilog="Example: ./vqm.py ~/yuvfolder")
parser.add_argument(
'path',
type=str,
help='Source file path, YCbCr 4:2:0')
parser.add_argument(
'-n',
'--numframes',
type=int,
help='Number of frames to analyze')
args = parser.parse_args()
vqm = VQM(**vars(args))
vqm.create_xlsx()
if __name__ == "__main__":
main()
@jainxy
Copy link

jainxy commented Aug 14, 2020

@line#91 - shouldn't it be "f - fps"?

@figgis
Copy link
Author

figgis commented Aug 28, 2020

Probably. Years since I looked at this code :-)

@gitunit
Copy link

gitunit commented Jun 8, 2022

any idea if the binary file is open-source somewhere?

@figgis
Copy link
Author

figgis commented Jun 8, 2022

@gitunit
Copy link

gitunit commented Jun 8, 2022

im aware of this page but there is only the deprecated matlab version or am i missing something?

@figgis
Copy link
Author

figgis commented Jun 9, 2022

There are binaries for linux, windows etc available from that page. Source code is not available though but all the papers that the implementation is based up are listed as well

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment