Skip to content

Instantly share code, notes, and snippets.

@junhe
Last active August 27, 2015 02:18
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save junhe/b6ce39eeb6de8887e66a to your computer and use it in GitHub Desktop.
Save junhe/b6ce39eeb6de8887e66a to your computer and use it in GitHub Desktop.
Add README.md

This is a reproducer of a btrfs -o discard problem.

Wed 26 Aug 2015 09:16:38 PM CDT

Please check git log for updates.

To run

$ sudo python main.py

This will:

  1. create a loop device backed by /mnt/tmpfs/disk.img
  2. make btrfs on the loop device
  3. mount btrfs with -o discard,ssd,autodefrag to /mnt/fsonloop
  4. overwrite and fsync 4MB file for 50 times

To run blktrace with it

Go to main.py and uncomment some lines (it should be obvious).

import os
import re
import subprocess
import time
import utils
class BlockTraceManager(object):
"This class provides interfaces to interact with blktrace"
def __init__(self, dev, resultpath, to_ftlsim_path, sector_size):
self.dev = dev
self.resultpath = resultpath
self.to_ftlsim_path = to_ftlsim_path
self.sector_size = sector_size
def start_tracing_and_collecting(self):
self.proc = start_blktrace_on_bg(self.dev, self.resultpath)
def stop_tracing_and_collecting(self):
"this is not elegant... TODO:improve"
stop_blktrace_on_bg()
def blkparse_file_to_ftlsim_input_file(self):
table = parse_blkparse_to_table(open(self.resultpath, 'r'))
utils.prepare_dir_for_path(self.to_ftlsim_path)
finaltable_to_ftlsim_input(table, self.to_ftlsim_path,
self.sector_size)
def start_blktrace_on_bg(dev, resultpath):
utils.prepare_dir_for_path(resultpath)
# cmd = "sudo blktrace -a write -a read -d {dev} -o - | blkparse -i - > "\
cmd = "sudo blktrace -a queue -d {dev} -o - | blkparse -i - > "\
"{resultpath}".format(dev = dev, resultpath = resultpath)
print cmd
p = subprocess.Popen(cmd, shell=True)
time.sleep(0.3) # wait to see if there's any immediate error.
if p.poll() != None:
raise RuntimeError("tracing failed to start")
return p
def stop_blktrace_on_bg():
utils.shcmd('pkill blkparse', ignore_error=True)
utils.shcmd('pkill blktrace', ignore_error=True)
utils.shcmd('sync')
# try:
# proc.terminate()
# except Exception, e:
# print e
# exit(1)
def is_data_line(line):
# devid blockstart + nblocks
match_obj = re.match( r'\d+,\d+.*\d+\s+\+\s+\d+', line)
if match_obj == None:
return False
else:
return True
def parse_blkparse_to_table(line_iter):
def line2dic(line):
"is_data_line() must be true for this line"\
"['8,0', '0', '1', '0.000000000', '440', 'A', 'W', '12912077', '+', '8', '<-', '(8,2)', '606224']"
names = ['devid', 'cpuid', 'seqid', 'time', 'pid', 'action', 'RWBS', 'blockstart', 'ignore1', 'size']
# 0 1 2 3 4 5 6 7 8 9
items = line.split()
dic = dict(zip(names, items))
assert len(items) >= len(names)
return dic
table = []
for line in line_iter:
line = line.strip()
# print is_data_line(line), line
if not is_data_line(line):
continue
ret = line2dic(line)
if ret != None:
table.append(ret)
return table
########################################################
# table = [
# {'col1':data, 'col2':data, ..},
# {'col1':data, 'col2':data, ..},
# ...
# ]
def table_to_file(table, filepath, adddic=None):
'save table to a file with additional columns'
utils.prepare_dir_for_path(filepath)
with open(filepath, 'w') as f:
colnames = table[0].keys()
if adddic != None:
colnames += adddic.keys()
colnamestr = ';'.join(colnames) + '\n'
f.write(colnamestr)
for row in table:
if adddic != None:
rowcopy = dict(row.items() + adddic.items())
else:
rowcopy = row
rowstr = [rowcopy[k] for k in colnames]
rowstr = [str(x) for x in rowstr]
rowstr = ';'.join(rowstr) + '\n'
f.write(rowstr)
def blkparse_to_parsed_files(blkparse_path):
table = parse_blkparse_to_table(open(blkparse_path, 'r'))
# table_to_file(table, table_path)
table_path = conf.get_ftlsim_events_output_path()
utils.prepare_dir_for_path(table_path)
finaltable_to_ftlsim_input(table, table_path)
def finaltable_to_ftlsim_input(table, out_path, sector_size):
print '-----------------wrting to out_path', out_path
utils.prepare_dir_for_path(out_path)
out = open(out_path, 'w')
for row in table:
blk_start = int(row['blockstart'])
size = int(row['size'])
byte_offset = blk_start * sector_size
byte_size = size * sector_size
if row['RWBS'] == 'D':
operation = 'discard'
elif 'W' in row['RWBS']:
operation = 'write'
elif 'R' in row['RWBS']:
operation = 'read'
else:
raise RuntimeError('unknow operation')
items = [str(x) for x in [operation, byte_offset, byte_size]]
line = ' '.join(items)+'\n'
out.write( line )
out.flush()
os.fsync(out)
out.close()
root tree
leaf 4288512 items 10 free space 885 generation 10 owner 1
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (EXTENT_TREE ROOT_ITEM 0) itemoff 3556 itemsize 439
root data bytenr 4280320 level 0 dirid 0 refs 1 gen 10
uuid 00000000-0000-0000-0000-000000000000
item 1 key (DEV_TREE ROOT_ITEM 0) itemoff 3117 itemsize 439
root data bytenr 9236480 level 0 dirid 0 refs 1 gen 9
uuid 00000000-0000-0000-0000-000000000000
item 2 key (FS_TREE INODE_REF 6) itemoff 3100 itemsize 17
inode ref index 0 namelen 7 name: default
item 3 key (FS_TREE ROOT_ITEM 0) itemoff 2661 itemsize 439
root data bytenr 4206592 level 0 dirid 256 refs 1 gen 10
uuid 00000000-0000-0000-0000-000000000000
ctransid 10 otransid 0 stransid 0 rtransid 0
item 4 key (ROOT_TREE_DIR INODE_ITEM 0) itemoff 2501 itemsize 160
inode generation 3 transid 0 size 0 block group 0 mode 40755 links 1
item 5 key (ROOT_TREE_DIR INODE_REF 6) itemoff 2489 itemsize 12
inode ref index 0 namelen 2 name: ..
item 6 key (ROOT_TREE_DIR DIR_ITEM 2378154706) itemoff 2452 itemsize 37
location key (FS_TREE ROOT_ITEM -1) type DIR
namelen 7 datalen 0 name: default
item 7 key (CSUM_TREE ROOT_ITEM 0) itemoff 2013 itemsize 439
root data bytenr 4284416 level 1 dirid 0 refs 1 gen 10
uuid 00000000-0000-0000-0000-000000000000
item 8 key (UUID_TREE ROOT_ITEM 0) itemoff 1574 itemsize 439
root data bytenr 4198400 level 0 dirid 0 refs 1 gen 5
uuid 0f776680-9d60-3c4b-8d63-f02556f65f9f
item 9 key (DATA_RELOC_TREE ROOT_ITEM 0) itemoff 1135 itemsize 439
root data bytenr 4231168 level 0 dirid 256 refs 1 gen 4
uuid 00000000-0000-0000-0000-000000000000
chunk tree
leaf 131072 items 7 free space 3242 generation 9 owner 3
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (DEV_ITEMS DEV_ITEM 1) itemoff 3897 itemsize 98
dev item devid 1 total_bytes 268435456 bytes used 146800640
item 1 key (FIRST_CHUNK_TREE CHUNK_ITEM 0) itemoff 3817 itemsize 80
chunk length 4194304 owner 2 type 2 num_stripes 1
stripe 0 devid 1 offset 0
item 2 key (FIRST_CHUNK_TREE CHUNK_ITEM 4194304) itemoff 3737 itemsize 80
chunk length 8388608 owner 2 type 5 num_stripes 1
stripe 0 devid 1 offset 4194304
item 3 key (FIRST_CHUNK_TREE CHUNK_ITEM 12582912) itemoff 3657 itemsize 80
chunk length 33554432 owner 2 type 5 num_stripes 1
stripe 0 devid 1 offset 12582912
item 4 key (FIRST_CHUNK_TREE CHUNK_ITEM 46137344) itemoff 3577 itemsize 80
chunk length 33554432 owner 2 type 5 num_stripes 1
stripe 0 devid 1 offset 46137344
item 5 key (FIRST_CHUNK_TREE CHUNK_ITEM 79691776) itemoff 3497 itemsize 80
chunk length 33554432 owner 2 type 5 num_stripes 1
stripe 0 devid 1 offset 79691776
item 6 key (FIRST_CHUNK_TREE CHUNK_ITEM 113246208) itemoff 3417 itemsize 80
chunk length 33554432 owner 2 type 5 num_stripes 1
stripe 0 devid 1 offset 113246208
extent tree key (EXTENT_TREE ROOT_ITEM 0)
leaf 4280320 items 17 free space 2863 generation 10 owner 2
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (0 BLOCK_GROUP_ITEM 4194304) itemoff 3971 itemsize 24
block group used 4096 chunk_objectid 256 flags 2
item 1 key (131072 EXTENT_ITEM 4096) itemoff 3920 itemsize 51
extent refs 1 gen 9 flags 2
tree block key (DEV_ITEMS DEV_ITEM 1) level 0
tree block backref root 3
item 2 key (4194304 BLOCK_GROUP_ITEM 8388608) itemoff 3896 itemsize 24
block group used 36864 chunk_objectid 256 flags 5
item 3 key (4198400 EXTENT_ITEM 4096) itemoff 3845 itemsize 51
extent refs 1 gen 5 flags 2
tree block key (0 UNKNOWN.0 0) level 0
tree block backref root 9
item 4 key (4206592 EXTENT_ITEM 4096) itemoff 3794 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (256 INODE_ITEM 0) level 0
tree block backref root 5
item 5 key (4231168 EXTENT_ITEM 4096) itemoff 3743 itemsize 51
extent refs 1 gen 4 flags 2
tree block key (256 INODE_ITEM 0) level 0
tree block backref root 18446744073709551607
item 6 key (4251648 EXTENT_ITEM 4096) itemoff 3692 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (EXTENT_CSUM EXTENT_CSUM 125452288) level 0
tree block backref root 7
item 7 key (4280320 EXTENT_ITEM 4096) itemoff 3641 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (0 BLOCK_GROUP_ITEM 4194304) level 0
tree block backref root 2
item 8 key (4284416 EXTENT_ITEM 4096) itemoff 3590 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (EXTENT_CSUM EXTENT_CSUM 113246208) level 1
tree block backref root 7
item 9 key (4288512 EXTENT_ITEM 4096) itemoff 3539 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (EXTENT_TREE ROOT_ITEM 0) level 0
tree block backref root 1
item 10 key (4292608 EXTENT_ITEM 4096) itemoff 3488 itemsize 51
extent refs 1 gen 10 flags 2
tree block key (EXTENT_CSUM EXTENT_CSUM 121417728) level 0
tree block backref root 7
item 11 key (9236480 EXTENT_ITEM 4096) itemoff 3437 itemsize 51
extent refs 1 gen 9 flags 2
tree block key (0 DEV_STATS_ITEM 1) level 0
tree block backref root 4
item 12 key (12582912 BLOCK_GROUP_ITEM 33554432) itemoff 3413 itemsize 24
block group used 0 chunk_objectid 256 flags 5
item 13 key (46137344 BLOCK_GROUP_ITEM 33554432) itemoff 3389 itemsize 24
block group used 0 chunk_objectid 256 flags 5
item 14 key (79691776 BLOCK_GROUP_ITEM 33554432) itemoff 3365 itemsize 24
block group used 0 chunk_objectid 256 flags 5
item 15 key (113246208 BLOCK_GROUP_ITEM 33554432) itemoff 3341 itemsize 24
block group used 4194304 chunk_objectid 256 flags 5
item 16 key (121634816 EXTENT_ITEM 4194304) itemoff 3288 itemsize 53
extent refs 1 gen 10 flags 1
extent data backref root 5 objectid 257 offset 0 count 1
device tree key (DEV_TREE ROOT_ITEM 0)
leaf 9236480 items 7 free space 3492 generation 9 owner 4
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (0 DEV_STATS_ITEM 1) itemoff 3955 itemsize 40
device stats
item 1 key (1 DEV_EXTENT 0) itemoff 3907 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 0 length 4194304
item 2 key (1 DEV_EXTENT 4194304) itemoff 3859 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 4194304 length 8388608
item 3 key (1 DEV_EXTENT 12582912) itemoff 3811 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 12582912 length 33554432
item 4 key (1 DEV_EXTENT 46137344) itemoff 3763 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 46137344 length 33554432
item 5 key (1 DEV_EXTENT 79691776) itemoff 3715 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 79691776 length 33554432
item 6 key (1 DEV_EXTENT 113246208) itemoff 3667 itemsize 48
dev extent chunk_tree 3
chunk objectid 256 chunk offset 113246208 length 33554432
fs tree key (FS_TREE ROOT_ITEM 0)
leaf 4206592 items 7 free space 3338 generation 10 owner 5
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (256 INODE_ITEM 0) itemoff 3835 itemsize 160
inode generation 3 transid 6 size 18 block group 0 mode 40755 links 1
item 1 key (256 INODE_REF 256) itemoff 3823 itemsize 12
inode ref index 0 namelen 2 name: ..
item 2 key (256 DIR_ITEM 2412072806) itemoff 3784 itemsize 39
location key (257 INODE_ITEM 0) type FILE
namelen 9 datalen 0 name: test.file
item 3 key (256 DIR_INDEX 2) itemoff 3745 itemsize 39
location key (257 INODE_ITEM 0) type FILE
namelen 9 datalen 0 name: test.file
item 4 key (257 INODE_ITEM 0) itemoff 3585 itemsize 160
inode generation 6 transid 10 size 4194304 block group 0 mode 100644 links 1
item 5 key (257 INODE_REF 256) itemoff 3566 itemsize 19
inode ref index 2 namelen 9 name: test.file
item 6 key (257 EXTENT_DATA 0) itemoff 3513 itemsize 53
extent data disk byte 121634816 nr 4194304
extent data offset 0 nr 4194304 ram 4194304
extent compression 0
checksum tree key (CSUM_TREE ROOT_ITEM 0)
node 4284416 level 1 items 2 free 119 generation 10 owner 7
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
key (EXTENT_CSUM EXTENT_CSUM 121634816) block 4292608 (1048) gen 10
key (EXTENT_CSUM EXTENT_CSUM 125452288) block 4251648 (1038) gen 10
leaf 4292608 items 1 free space 242 generation 10 owner 7
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (EXTENT_CSUM EXTENT_CSUM 121634816) itemoff 267 itemsize 3728
extent csum item
leaf 4251648 items 1 free space 3602 generation 10 owner 7
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (EXTENT_CSUM EXTENT_CSUM 125452288) itemoff 3627 itemsize 368
extent csum item
uuid tree key (UUID_TREE ROOT_ITEM 0)
leaf 4198400 items 0 free space 3995 generation 5 owner 9
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
data reloc tree key (DATA_RELOC_TREE ROOT_ITEM 0)
leaf 4231168 items 2 free space 3773 generation 4 owner 18446744073709551607
fs uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
chunk uuid c4af716a-65d8-459c-903a-10896741ba86
item 0 key (256 INODE_ITEM 0) itemoff 3835 itemsize 160
inode generation 3 transid 0 size 0 block group 0 mode 40755 links 1
item 1 key (256 INODE_REF 256) itemoff 3823 itemsize 12
inode ref index 0 namelen 2 name: ..
total bytes 268435456
bytes used 4235264
uuid 0328595d-e2e0-4217-a28a-a61acfe06c0e
Btrfs v3.12
jun@jun-VirtualBox:~/workdir/b6ce39eeb6de8887e66a$ filefrag -b4096 -v ~/tmp/disk.img
Filesystem type is: ef53
File size of /home/jun/tmp/disk.img is 268435456 (65536 blocks of 4096 bytes)
ext: logical_offset: physical_offset: length: expected: flags:
0: 0.. 32: 94208.. 94240: 33:
1: 1025.. 1025: 95233.. 95233: 1: 94241:
2: 1027.. 1027: 76153.. 76153: 1: 95234:
3: 1033.. 1033: 95241.. 95241: 1: 76154:
4: 1038.. 1038: 76164.. 76164: 1: 95242:
5: 1045.. 1048: 76171.. 76174: 4: 76165:
6: 2255.. 2255: 929999.. 929999: 1: 76175:
7: 16384.. 16384: 110592.. 110592: 1: 930000:
8: 29696.. 30719: 945152.. 946175: 1024: 110593:
/home/jun/tmp/disk.img: 9 extents found
jun@jun-VirtualBox:~/workdir/b6ce39eeb6de8887e66a$ filefrag -b4096 -v ~/tmp/disk.img
Filesystem type is: ef53
File size of /home/jun/tmp/disk.img is 268435456 (65536 blocks of 4096 bytes)
ext: logical_offset: physical_offset: length: expected: flags:
0: 0.. 32: 94208.. 94240: 33:
1: 34.. 511: 94242.. 94719: 478: 94241:
2: 1025.. 1025: 95233.. 95233: 1: 94720:
3: 1027.. 1027: 76153.. 76153: 1: 95234:
4: 1033.. 1033: 95241.. 95241: 1: 76154:
5: 1038.. 1038: 76164.. 76164: 1: 95242:
6: 1045.. 1048: 76171.. 76174: 4: 76165:
7: 2255.. 2255: 929999.. 929999: 1: 76175:
8: 11264.. 16383: 931840.. 936959: 5120: 930000:
9: 16384.. 16384: 110592.. 110592: 1: 936960:
10: 29696.. 30719: 945152.. 946175: 1024: 110593:
11: 65024.. 65535: 359936.. 360447: 512: 946176: eof
/home/jun/tmp/disk.img: 12 extents found
import abc
import os
import fshelper
import utils
class LoopDevice(object):
def __init__(self, dev_path, image_folder, size_mb, img_file=None):
self.dev_path = dev_path
self.image_folder = image_folder
self.size_mb = size_mb
self.img_file = img_file
def create(self):
fshelper.make_loop_device(self.dev_path, self.image_folder,
self.size_mb, self.img_file)
def delete(self):
fshelper.delLoopDev(self.dev_path)
class FileSystemBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self, device, mount_point):
self.dev = device
self.mount_point = mount_point
@abc.abstractmethod
def make(self):
"will never be here"
raise NotImplementedError
def mount(self, opt_list=None):
if opt_list == None or len(opt_list) == 0:
opt_str = ''
else:
opt_str = '-o ' + ','.join(opt_list)
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point)
ret = utils.shcmd('mount {opt} {dev} {mp}'.format(
opt = opt_str, dev = self.dev, mp = self.mount_point), ignore_error = True)
if ret != 0:
raise RuntimeError("Failed to mount dev:{} to dir:{}".format(
self.dev, self.mount_point))
def umount(self):
ret = fshelper.umountFS(self.mount_point)
if ret != 0:
raise RuntimeError("Failed to umount {}".format(self.mount_point))
def sync(self):
common.shcmd("sync")
class Btrfs(FileSystemBase):
def make(self, opt_dic=None):
if opt_dic == None:
opt_str = ''
else:
items = [ ' '.join([k,v]) for k,v in opt_dic.items() ]
opt_str = ' '.join(items)
ret = utils.shcmd('mkfs.btrfs {opt} {dev}'.format(
opt=opt_str, dev = self.dev), ignore_error = True)
if ret != 0:
raise RuntimeError("Failed to make dev:{}".format(self.dev))
#!/usr/bin/env python
import json
import os
import re
import subprocess
import utils
def umountFS(mountpoint):
cmd = ["umount", mountpoint]
p = subprocess.Popen(cmd)
p.wait()
return p.returncode
def mkLoopDevOnFile(devname, filepath):
cmd = ['losetup', devname, filepath]
cmd = [str(x) for x in cmd]
print " ".join(cmd), "......"
proc = subprocess.Popen(cmd)
proc.wait()
return proc.returncode
def delLoopDev(devname):
cmd = ['losetup', '-d', devname]
cmd = [str(x) for x in cmd]
print " ".join(cmd), "......"
proc = subprocess.Popen(cmd)
proc.wait()
return proc.returncode
def isMounted(name):
"only check is a name is in mounted list"
name = name.rstrip('/')
print "isMounted: name:", name
with open('/etc/mtab', 'r') as f:
for line in f:
#print "line:", line,
line = " " + line + " " # a hack
if re.search(r'\s'+name+r'\s', line):
#print " YES"
return True
#print " NO"
return False
def isLoopDevUsed(path):
cmd = ['losetup','-f']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc.wait()
outstr = proc.communicate()[0]
outstr = outstr.strip()
if outstr > path:
return True
else:
return False
def umountFS(mountpoint):
cmd = ["umount", mountpoint]
p = subprocess.Popen(cmd)
p.wait()
return p.returncode
def make_loop_device(devname, image_folder, sizeMB, img_file=None):
"size is in MB. The tmpfs for this device might be bigger than sizeMB"
if not devname.startswith('/dev/loop'):
raise RuntimeError('you are requesting to create loop device on a non-loop device path')
if not os.path.exists(image_folder):
os.makedirs(image_folder)
# umount the FS mounted on loop dev
if isMounted(devname):
if umountFS(devname) != 0:
raise RuntimeError("unable to umount {}".format(devname))
else:
print devname, 'umounted'
else:
print devname, "is not mounted"
# delete the loop device
if isLoopDevUsed(devname):
if delLoopDev(devname) != 0:
raise RuntimeError("!!!!!!!!!!!!! Failed to delete loop device")
else:
print devname, 'is deleted'
else:
print devname, "is not in use"
imgpath = os.path.join(image_folder, "disk.img")
utils.prepare_dir_for_path(imgpath)
if img_file == None:
mkImageFile(imgpath, sizeMB)
else:
cmd = ['cp', img_file, imgpath]
subprocess.call(cmd)
ret = mkLoopDevOnFile(devname, imgpath)
if ret != 0:
raise RuntimeError("Failed at losetup")
def mkImageFile(filepath, size):
"size is in MB"
if os.path.exists(filepath):
utils.shcmd("rm -f {}".format(filepath))
cmd = ['truncate', '-s', str(size*1024*1024), filepath]
print " ".join(cmd), "......"
proc = subprocess.Popen(cmd)
proc.wait()
return proc.returncode
def mountTmpfs(mountpoint, size):
if not os.path.exists(mountpoint):
os.makedirs(mountpoint)
cmd = ['mount', '-t', 'tmpfs',
'-o', 'size='+str(size), 'tmpfs', mountpoint]
cmd = [str(x) for x in cmd]
print " ".join(cmd), "......"
proc = subprocess.Popen(cmd)
proc.wait()
return proc.returncode
#!/usr/bin/env python
import subprocess
import os
import sys
import shlex
import time
import utils, filesystem, fshelper, blocktrace
def shcmd(cmd, ignore_error=False):
print 'Doing:', cmd
ret = subprocess.call(cmd, shell=True)
print 'Returned', ret, cmd
if ignore_error == False and ret != 0:
exit(ret)
return ret
def run_workload(btrfs_mount_point):
file_path = os.path.join(btrfs_mount_point, 'test.file')
buf = bytearray(['z'] * 4 * 2**20) # 4MB buffer
f = open(file_path, 'wb')
for i in range(50):
f.seek(0)
f.write(buf)
os.fsync(f)
f.close()
shcmd("sync")
def main():
loop_path = '/dev/loop0'
btrfs_mount_point = '/mnt/fsonloop'
# use a filesystem where filefrag is supported, so later you can
# do filefrag
image_folder = '/home/jun/tmp'
loopdev = filesystem.LoopDevice(
dev_path = loop_path,
image_folder = image_folder,
size_mb = 256)
fs = filesystem.Btrfs(device = loop_path,
mount_point = btrfs_mount_point)
# blktracer = blocktrace.BlockTraceManager(
# dev = loop_path,
# resultpath = "./blkparse.output.txt",
# to_ftlsim_path = "./events.txt", # operation offset size (in bytes)
# sector_size = 512)
loopdev.create()
fs.make()
fs.mount(opt_list = ['discard', 'ssd', 'autodefrag'])
shcmd("sync")
# blktracer.start_tracing_and_collecting()
run_workload(btrfs_mount_point)
# blktracer.blkparse_file_to_ftlsim_input_file()
# blktracer.stop_tracing_and_collecting()
if __name__ == '__main__':
main()
import itertools
import json
import random
import argparse
import re
import subprocess
import os
import sys
import shlex
import time
import glob
from time import localtime, strftime
def shcmd(cmd, ignore_error=False):
print 'Doing:', cmd
ret = subprocess.call(cmd, shell=True)
print 'Returned', ret, cmd
if ignore_error == False and ret != 0:
raise RuntimeError("Failed to execute {}. Return code:{}".format(
cmd, ret))
return ret
def run_and_get_output(cmd):
output = []
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
return p.stdout.readlines()
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
########################################################
# table = [
# {'col1':data, 'col2':data, ..},
# {'col1':data, 'col2':data, ..},
# ...
# ]
def table_to_file(table, filepath, adddic=None):
'save table to a file with additional columns'
with open(filepath, 'w') as f:
if len(table) == 0:
return
colnames = table[0].keys()
if adddic != None:
colnames += adddic.keys()
colnamestr = ';'.join(colnames) + '\n'
f.write(colnamestr)
for row in table:
if adddic != None:
rowcopy = dict(row.items() + adddic.items())
else:
rowcopy = row
rowstr = [rowcopy[k] for k in colnames]
rowstr = [str(x) for x in rowstr]
rowstr = ';'.join(rowstr) + '\n'
f.write(rowstr)
def adjust_width(s, width = 32):
return s.rjust(width)
def table_to_str(table, adddic = None, sep = ';'):
if len(table) == 0:
return None
tablestr = ''
colnames = table[0].keys()
if adddic != None:
colnames += adddic.keys()
colnamestr = sep.join([adjust_width(s) for s in colnames]) + '\n'
tablestr += colnamestr
for row in table:
if adddic != None:
rowcopy = dict(row.items() + adddic.items())
else:
rowcopy = row
rowstr = [rowcopy[k] for k in colnames]
rowstr = [adjust_width(str(x)) for x in rowstr]
rowstr = sep.join(rowstr) + '\n'
tablestr += rowstr
return tablestr
def load_json(fpath):
decoded = json.load(open(fpath, 'r'))
return decoded
def prepare_dir_for_path(path):
"create parent dirs for path if necessary"
dirpath = os.path.dirname(path)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def ParameterCombinations(parameter_dict):
"""
Get all the cominbation of the values from each key
http://tinyurl.com/nnglcs9
Input: parameter_dict={
p0:[x, y, z, ..],
p1:[a, b, c, ..],
...}
Output: [
{p0:x, p1:a, ..},
{..},
...
]
"""
d = parameter_dict
return [dict(zip(d, v)) for v in itertools.product(*d.values())]
def debug_decor(function):
def wrapper(*args, **kwargs):
ret = function(*args, **kwargs)
print function.__name__, args, kwargs, 'ret:', ret
return ret
return wrapper
def breakpoint():
import pdb; pdb.set_trace()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment