Implement storedevs, prepare to port littlefs
This commit is contained in:
1433
kernel/fs/littlefs/scripts/bench.py
Executable file
1433
kernel/fs/littlefs/scripts/bench.py
Executable file
File diff suppressed because it is too large
Load Diff
181
kernel/fs/littlefs/scripts/changeprefix.py
Executable file
181
kernel/fs/littlefs/scripts/changeprefix.py
Executable file
@ -0,0 +1,181 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Change prefixes in files/filenames. Useful for creating different versions
|
||||
# of a codebase that don't conflict at compile time.
|
||||
#
|
||||
# Example:
|
||||
# $ ./scripts/changeprefix.py lfs lfs3
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2019, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import glob
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
GIT_PATH = ['git']
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def changeprefix(from_prefix, to_prefix, line):
|
||||
line, count1 = re.subn(
|
||||
'\\b'+from_prefix,
|
||||
to_prefix,
|
||||
line)
|
||||
line, count2 = re.subn(
|
||||
'\\b'+from_prefix.upper(),
|
||||
to_prefix.upper(),
|
||||
line)
|
||||
line, count3 = re.subn(
|
||||
'\\B-D'+from_prefix.upper(),
|
||||
'-D'+to_prefix.upper(),
|
||||
line)
|
||||
return line, count1+count2+count3
|
||||
|
||||
def changefile(from_prefix, to_prefix, from_path, to_path, *,
|
||||
no_replacements=False):
|
||||
# rename any prefixes in file
|
||||
count = 0
|
||||
|
||||
# create a temporary file to avoid overwriting ourself
|
||||
if from_path == to_path and to_path != '-':
|
||||
to_path_temp = tempfile.NamedTemporaryFile('w', delete=False)
|
||||
to_path = to_path_temp.name
|
||||
else:
|
||||
to_path_temp = None
|
||||
|
||||
with openio(from_path) as from_f:
|
||||
with openio(to_path, 'w') as to_f:
|
||||
for line in from_f:
|
||||
if not no_replacements:
|
||||
line, n = changeprefix(from_prefix, to_prefix, line)
|
||||
count += n
|
||||
to_f.write(line)
|
||||
|
||||
if from_path != '-' and to_path != '-':
|
||||
shutil.copystat(from_path, to_path)
|
||||
|
||||
if to_path_temp:
|
||||
shutil.move(to_path, from_path)
|
||||
elif from_path != '-':
|
||||
os.remove(from_path)
|
||||
|
||||
# Summary
|
||||
print('%s: %d replacements' % (
|
||||
'%s -> %s' % (from_path, to_path) if not to_path_temp else from_path,
|
||||
count))
|
||||
|
||||
def main(from_prefix, to_prefix, paths=[], *,
|
||||
verbose=False,
|
||||
output=None,
|
||||
no_replacements=False,
|
||||
no_renames=False,
|
||||
git=False,
|
||||
no_stage=False,
|
||||
git_path=GIT_PATH):
|
||||
if not paths:
|
||||
if git:
|
||||
cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
paths = subprocess.check_output(cmd, encoding='utf8').split()
|
||||
else:
|
||||
print('no paths?', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
for from_path in paths:
|
||||
# rename filename?
|
||||
if output:
|
||||
to_path = output
|
||||
elif no_renames:
|
||||
to_path = from_path
|
||||
else:
|
||||
to_path = os.path.join(
|
||||
os.path.dirname(from_path),
|
||||
changeprefix(from_prefix, to_prefix,
|
||||
os.path.basename(from_path))[0])
|
||||
|
||||
# rename contents
|
||||
changefile(from_prefix, to_prefix, from_path, to_path,
|
||||
no_replacements=no_replacements)
|
||||
|
||||
# stage?
|
||||
if git and not no_stage:
|
||||
if from_path != to_path:
|
||||
cmd = git_path + ['rm', '-q', from_path]
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
subprocess.check_call(cmd)
|
||||
cmd = git_path + ['add', to_path]
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Change prefixes in files/filenames. Useful for creating "
|
||||
"different versions of a codebase that don't conflict at compile "
|
||||
"time.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'from_prefix',
|
||||
help="Prefix to replace.")
|
||||
parser.add_argument(
|
||||
'to_prefix',
|
||||
help="Prefix to replace with.")
|
||||
parser.add_argument(
|
||||
'paths',
|
||||
nargs='*',
|
||||
help="Files to operate on.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Output file.")
|
||||
parser.add_argument(
|
||||
'-N', '--no-replacements',
|
||||
action='store_true',
|
||||
help="Don't change prefixes in files")
|
||||
parser.add_argument(
|
||||
'-R', '--no-renames',
|
||||
action='store_true',
|
||||
help="Don't rename files")
|
||||
parser.add_argument(
|
||||
'--git',
|
||||
action='store_true',
|
||||
help="Use git to find/update files.")
|
||||
parser.add_argument(
|
||||
'--no-stage',
|
||||
action='store_true',
|
||||
help="Don't stage changes with git.")
|
||||
parser.add_argument(
|
||||
'--git-path',
|
||||
type=lambda x: x.split(),
|
||||
default=GIT_PATH,
|
||||
help="Path to git executable, may include flags. "
|
||||
"Defaults to %r." % GIT_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
707
kernel/fs/littlefs/scripts/code.py
Executable file
707
kernel/fs/littlefs/scripts/code.py
Executable file
@ -0,0 +1,707 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find code size at the function level. Basically just a big wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/code.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
NM_PATH = ['nm']
|
||||
NM_TYPES = 'tTrRdD'
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# code size results
|
||||
class CodeResult(co.namedtuple('CodeResult', [
|
||||
'file', 'function',
|
||||
'size'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', size=0):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return CodeResult(self.file, self.function,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
nm_path=NM_PATH,
|
||||
nm_types=NM_TYPES,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
size_pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||
' (?P<func>.+?)$')
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# guess the source, if we have debug-info we'll replace this later
|
||||
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||
|
||||
# find symbol sizes
|
||||
results_ = []
|
||||
# note nm-path may contain extra args
|
||||
cmd = nm_path + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
m = size_pattern.match(line)
|
||||
if m:
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not everything and func.startswith('__'):
|
||||
continue
|
||||
results_.append(CodeResult(
|
||||
file, func,
|
||||
int(m.group('size'), 16)))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
# try to figure out the source file if we have debug-info
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
defs = {}
|
||||
is_func = False
|
||||
f_name = None
|
||||
f_file = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find definitions
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||
elif m.group('name'):
|
||||
f_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
f_file = int(m.group('file'))
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
for r in results_:
|
||||
# find best matching debug symbol, this may be slightly different
|
||||
# due to optimizations
|
||||
if defs:
|
||||
# exact match? avoid difflib if we can for speed
|
||||
if r.function in defs:
|
||||
file = defs[r.function]
|
||||
else:
|
||||
_, file = max(
|
||||
defs.items(),
|
||||
key=lambda d: difflib.SequenceMatcher(None,
|
||||
d[0],
|
||||
r.function, False).ratio())
|
||||
else:
|
||||
file = r.file
|
||||
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
file = os.path.relpath(file)
|
||||
else:
|
||||
file = os.path.abspath(file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('code_'+k in r and r['code_'+k].strip()
|
||||
for k in CodeResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(CodeResult(
|
||||
**{k: r[k] for k in CodeResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['code_'+k] for k in CodeResult._fields
|
||||
if 'code_'+k in r and r['code_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(CodeResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else CodeResult._sort)),
|
||||
reverse=reverse ^ (not k or k in CodeResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else CodeResult._by)
|
||||
+ ['code_'+k for k in (
|
||||
fields if fields is not None else CodeResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else CodeResult._by)}
|
||||
| {'code_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else CodeResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('code_'+k in r and r['code_'+k].strip()
|
||||
for k in CodeResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(CodeResult(
|
||||
**{k: r[k] for k in CodeResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['code_'+k] for k in CodeResult._fields
|
||||
if 'code_'+k in r and r['code_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(CodeResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(CodeResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find code size at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=CodeResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=CodeResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--nm-types',
|
||||
default=NM_TYPES,
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||
parser.add_argument(
|
||||
'--nm-path',
|
||||
type=lambda x: x.split(),
|
||||
default=NM_PATH,
|
||||
help="Path to the nm executable, may include flags. "
|
||||
"Defaults to %r." % NM_PATH)
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
828
kernel/fs/littlefs/scripts/cov.py
Executable file
828
kernel/fs/littlefs/scripts/cov.py
Executable file
@ -0,0 +1,828 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find coverage info after running tests.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/cov.py \
|
||||
# lfs.t.a.gcda lfs_util.t.a.gcda \
|
||||
# -Flfs.c -Flfs_util.c -slines
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import json
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
# TODO use explode_asserts to avoid counting assert branches?
|
||||
# TODO use dwarf=info to find functions for inline functions?
|
||||
|
||||
GCOV_PATH = ['gcov']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# fractional fields, a/b
|
||||
class Frac(co.namedtuple('Frac', 'a,b')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, a=0, b=None):
|
||||
if isinstance(a, Frac) and b is None:
|
||||
return a
|
||||
if isinstance(a, str) and b is None:
|
||||
a, b = a.split('/', 1)
|
||||
if b is None:
|
||||
b = a
|
||||
return super().__new__(cls, Int(a), Int(b))
|
||||
|
||||
def __str__(self):
|
||||
return '%s/%s' % (self.a, self.b)
|
||||
|
||||
def __float__(self):
|
||||
return float(self.a)
|
||||
|
||||
none = '%11s %7s' % ('-', '-')
|
||||
def table(self):
|
||||
t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
return '%11s %7s' % (
|
||||
self,
|
||||
'∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%.1f%%' % (100*t))
|
||||
|
||||
diff_none = '%11s' % '-'
|
||||
def diff_table(self):
|
||||
return '%11s' % (self,)
|
||||
|
||||
def diff_diff(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
return '%11s' % ('%s/%s' % (
|
||||
new_a.diff_diff(old_a).strip(),
|
||||
new_b.diff_diff(old_b).strip()))
|
||||
|
||||
def ratio(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
new = new_a.x/new_b.x if new_b.x else 1.0
|
||||
old = old_a.x/old_b.x if old_b.x else 1.0
|
||||
return new - old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.a + other.a, self.b + other.b)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.a - other.a, self.b - other.b)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.a * other.a, self.b + other.b)
|
||||
|
||||
def __lt__(self, other):
|
||||
self_t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
other_t = other.a.x/other.b.x if other.b.x else 1.0
|
||||
return (self_t, self.a.x) < (other_t, other.a.x)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.__class__.__lt__(other, self)
|
||||
|
||||
def __le__(self, other):
|
||||
return not self.__gt__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return not self.__lt__(other)
|
||||
|
||||
# coverage results
|
||||
class CovResult(co.namedtuple('CovResult', [
|
||||
'file', 'function', 'line',
|
||||
'calls', 'hits', 'funcs', 'lines', 'branches'])):
|
||||
_by = ['file', 'function', 'line']
|
||||
_fields = ['calls', 'hits', 'funcs', 'lines', 'branches']
|
||||
_sort = ['funcs', 'lines', 'branches', 'hits', 'calls']
|
||||
_types = {
|
||||
'calls': Int, 'hits': Int,
|
||||
'funcs': Frac, 'lines': Frac, 'branches': Frac}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', line=0,
|
||||
calls=0, hits=0, funcs=0, lines=0, branches=0):
|
||||
return super().__new__(cls, file, function, int(Int(line)),
|
||||
Int(calls), Int(hits), Frac(funcs), Frac(lines), Frac(branches))
|
||||
|
||||
def __add__(self, other):
|
||||
return CovResult(self.file, self.function, self.line,
|
||||
max(self.calls, other.calls),
|
||||
max(self.hits, other.hits),
|
||||
self.funcs + other.funcs,
|
||||
self.lines + other.lines,
|
||||
self.branches + other.branches)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(gcda_paths, *,
|
||||
gcov_path=GCOV_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
results = []
|
||||
for path in gcda_paths:
|
||||
# get coverage info through gcov's json output
|
||||
# note, gcov-path may contain extra args
|
||||
cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
data = json.load(proc.stdout)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect line/branch coverage
|
||||
for file in data['files']:
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file['file']) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file['file'])]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file['file'])]) == os.getcwd():
|
||||
file_name = os.path.relpath(file['file'])
|
||||
else:
|
||||
file_name = os.path.abspath(file['file'])
|
||||
|
||||
for func in file['functions']:
|
||||
func_name = func.get('name', '(inlined)')
|
||||
# discard internal functions (this includes injected test cases)
|
||||
if not everything:
|
||||
if func_name.startswith('__'):
|
||||
continue
|
||||
|
||||
# go ahead and add functions, later folding will merge this if
|
||||
# there are other hits on this line
|
||||
results.append(CovResult(
|
||||
file_name, func_name, func['start_line'],
|
||||
func['execution_count'], 0,
|
||||
Frac(1 if func['execution_count'] > 0 else 0, 1),
|
||||
0,
|
||||
0))
|
||||
|
||||
for line in file['lines']:
|
||||
func_name = line.get('function_name', '(inlined)')
|
||||
# discard internal function (this includes injected test cases)
|
||||
if not everything:
|
||||
if func_name.startswith('__'):
|
||||
continue
|
||||
|
||||
# go ahead and add lines, later folding will merge this if
|
||||
# there are other hits on this line
|
||||
results.append(CovResult(
|
||||
file_name, func_name, line['line_number'],
|
||||
0, line['count'],
|
||||
0,
|
||||
Frac(1 if line['count'] > 0 else 0, 1),
|
||||
Frac(
|
||||
sum(1 if branch['count'] > 0 else 0
|
||||
for branch in line['branches']),
|
||||
len(line['branches']))))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def annotate(Result, results, *,
|
||||
annotate=False,
|
||||
lines=False,
|
||||
branches=False,
|
||||
**args):
|
||||
# if neither branches/lines specified, color both
|
||||
if annotate and not lines and not branches:
|
||||
lines, branches = True, True
|
||||
|
||||
for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
|
||||
# flatten to line info
|
||||
results = fold(Result, results, by=['file', 'line'])
|
||||
table = {r.line: r for r in results if r.file == path}
|
||||
|
||||
# calculate spans to show
|
||||
if not annotate:
|
||||
spans = []
|
||||
last = None
|
||||
func = None
|
||||
for line, r in sorted(table.items()):
|
||||
if ((lines and int(r.hits) == 0)
|
||||
or (branches and r.branches.a < r.branches.b)):
|
||||
if last is not None and line - last.stop <= args['context']:
|
||||
last = range(
|
||||
last.start,
|
||||
line+1+args['context'])
|
||||
else:
|
||||
if last is not None:
|
||||
spans.append((last, func))
|
||||
last = range(
|
||||
line-args['context'],
|
||||
line+1+args['context'])
|
||||
func = r.function
|
||||
if last is not None:
|
||||
spans.append((last, func))
|
||||
|
||||
with open(path) as f:
|
||||
skipped = False
|
||||
for i, line in enumerate(f):
|
||||
# skip lines not in spans?
|
||||
if not annotate and not any(i+1 in s for s, _ in spans):
|
||||
skipped = True
|
||||
continue
|
||||
|
||||
if skipped:
|
||||
skipped = False
|
||||
print('%s@@ %s:%d: %s @@%s' % (
|
||||
'\x1b[36m' if args['color'] else '',
|
||||
path,
|
||||
i+1,
|
||||
next(iter(f for _, f in spans)),
|
||||
'\x1b[m' if args['color'] else ''))
|
||||
|
||||
# build line
|
||||
if line.endswith('\n'):
|
||||
line = line[:-1]
|
||||
|
||||
if i+1 in table:
|
||||
r = table[i+1]
|
||||
line = '%-*s // %s hits%s' % (
|
||||
args['width'],
|
||||
line,
|
||||
r.hits,
|
||||
', %s branches' % (r.branches,)
|
||||
if int(r.branches.b) else '')
|
||||
|
||||
if args['color']:
|
||||
if lines and int(r.hits) == 0:
|
||||
line = '\x1b[1;31m%s\x1b[m' % line
|
||||
elif branches and r.branches.a < r.branches.b:
|
||||
line = '\x1b[35m%s\x1b[m' % line
|
||||
|
||||
print(line)
|
||||
|
||||
|
||||
def main(gcda_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
hits=False,
|
||||
**args):
|
||||
# figure out what color should be
|
||||
if args.get('color') == 'auto':
|
||||
args['color'] = sys.stdout.isatty()
|
||||
elif args.get('color') == 'always':
|
||||
args['color'] = True
|
||||
else:
|
||||
args['color'] = False
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(gcda_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('cov_'+k in r and r['cov_'+k].strip()
|
||||
for k in CovResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(CovResult(
|
||||
**{k: r[k] for k in CovResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['cov_'+k]
|
||||
for k in CovResult._fields
|
||||
if 'cov_'+k in r
|
||||
and r['cov_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(CovResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else CovResult._sort)),
|
||||
reverse=reverse ^ (not k or k in CovResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else CovResult._by)
|
||||
+ ['cov_'+k for k in (
|
||||
fields if fields is not None else CovResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else CovResult._by)}
|
||||
| {'cov_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else CovResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('cov_'+k in r and r['cov_'+k].strip()
|
||||
for k in CovResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(CovResult(
|
||||
**{k: r[k] for k in CovResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['cov_'+k]
|
||||
for k in CovResult._fields
|
||||
if 'cov_'+k in r
|
||||
and r['cov_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(CovResult, diff_results,
|
||||
by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
if (args.get('annotate')
|
||||
or args.get('lines')
|
||||
or args.get('branches')):
|
||||
# annotate sources
|
||||
annotate(CovResult, results, **args)
|
||||
else:
|
||||
# print table
|
||||
table(CovResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields if fields is not None
|
||||
else ['lines', 'branches'] if not hits
|
||||
else ['calls', 'hits'],
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
# catch lack of coverage
|
||||
if args.get('error_on_lines') and any(
|
||||
r.lines.a < r.lines.b for r in results):
|
||||
sys.exit(2)
|
||||
elif args.get('error_on_branches') and any(
|
||||
r.branches.a < r.branches.b for r in results):
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find coverage info after running tests.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'gcda_paths',
|
||||
nargs='*',
|
||||
help="Input *.gcda files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=CovResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=CovResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--hits',
|
||||
action='store_true',
|
||||
help="Show total hits instead of coverage.")
|
||||
parser.add_argument(
|
||||
'-A', '--annotate',
|
||||
action='store_true',
|
||||
help="Show source files annotated with coverage info.")
|
||||
parser.add_argument(
|
||||
'-L', '--lines',
|
||||
action='store_true',
|
||||
help="Show uncovered lines.")
|
||||
parser.add_argument(
|
||||
'-B', '--branches',
|
||||
action='store_true',
|
||||
help="Show uncovered branches.")
|
||||
parser.add_argument(
|
||||
'-c', '--context',
|
||||
type=lambda x: int(x, 0),
|
||||
default=3,
|
||||
help="Show n additional lines of context. Defaults to 3.")
|
||||
parser.add_argument(
|
||||
'-W', '--width',
|
||||
type=lambda x: int(x, 0),
|
||||
default=80,
|
||||
help="Assume source is styled with this many columns. Defaults to 80.")
|
||||
parser.add_argument(
|
||||
'--color',
|
||||
choices=['never', 'always', 'auto'],
|
||||
default='auto',
|
||||
help="When to use terminal colors. Defaults to 'auto'.")
|
||||
parser.add_argument(
|
||||
'-e', '--error-on-lines',
|
||||
action='store_true',
|
||||
help="Error if any lines are not covered.")
|
||||
parser.add_argument(
|
||||
'-E', '--error-on-branches',
|
||||
action='store_true',
|
||||
help="Error if any branches are not covered.")
|
||||
parser.add_argument(
|
||||
'--gcov-path',
|
||||
default=GCOV_PATH,
|
||||
type=lambda x: x.split(),
|
||||
help="Path to the gcov executable, may include paths. "
|
||||
"Defaults to %r." % GCOV_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
704
kernel/fs/littlefs/scripts/data.py
Executable file
704
kernel/fs/littlefs/scripts/data.py
Executable file
@ -0,0 +1,704 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a big wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/data.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
NM_PATH = ['nm']
|
||||
NM_TYPES = 'dDbB'
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# data size results
|
||||
class DataResult(co.namedtuple('DataResult', [
|
||||
'file', 'function',
|
||||
'size'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', size=0):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return DataResult(self.file, self.function,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
nm_path=NM_PATH,
|
||||
nm_types=NM_TYPES,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
size_pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||
' (?P<func>.+?)$')
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# guess the source, if we have debug-info we'll replace this later
|
||||
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||
|
||||
# find symbol sizes
|
||||
results_ = []
|
||||
# note nm-path may contain extra args
|
||||
cmd = nm_path + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
m = size_pattern.match(line)
|
||||
if m:
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not everything and func.startswith('__'):
|
||||
continue
|
||||
results_.append(DataResult(
|
||||
file, func,
|
||||
int(m.group('size'), 16)))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
# try to figure out the source file if we have debug-info
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
defs = {}
|
||||
is_func = False
|
||||
f_name = None
|
||||
f_file = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find definitions
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||
elif m.group('name'):
|
||||
f_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
f_file = int(m.group('file'))
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
for r in results_:
|
||||
# find best matching debug symbol, this may be slightly different
|
||||
# due to optimizations
|
||||
if defs:
|
||||
# exact match? avoid difflib if we can for speed
|
||||
if r.function in defs:
|
||||
file = defs[r.function]
|
||||
else:
|
||||
_, file = max(
|
||||
defs.items(),
|
||||
key=lambda d: difflib.SequenceMatcher(None,
|
||||
d[0],
|
||||
r.function, False).ratio())
|
||||
else:
|
||||
file = r.file
|
||||
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
file = os.path.relpath(file)
|
||||
else:
|
||||
file = os.path.abspath(file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
try:
|
||||
results.append(DataResult(
|
||||
**{k: r[k] for k in DataResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['data_'+k] for k in DataResult._fields
|
||||
if 'data_'+k in r and r['data_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(DataResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else DataResult._sort)),
|
||||
reverse=reverse ^ (not k or k in DataResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else DataResult._by)
|
||||
+ ['data_'+k for k in (
|
||||
fields if fields is not None else DataResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else DataResult._by)}
|
||||
| {'data_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else DataResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('data_'+k in r and r['data_'+k].strip()
|
||||
for k in DataResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(DataResult(
|
||||
**{k: r[k] for k in DataResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['data_'+k] for k in DataResult._fields
|
||||
if 'data_'+k in r and r['data_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(DataResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(DataResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=DataResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=DataResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--nm-types',
|
||||
default=NM_TYPES,
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||
parser.add_argument(
|
||||
'--nm-path',
|
||||
type=lambda x: x.split(),
|
||||
default=NM_PATH,
|
||||
help="Path to the nm executable, may include flags. "
|
||||
"Defaults to %r." % NM_PATH)
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
1344
kernel/fs/littlefs/scripts/perf.py
Executable file
1344
kernel/fs/littlefs/scripts/perf.py
Executable file
File diff suppressed because it is too large
Load Diff
1276
kernel/fs/littlefs/scripts/perfbd.py
Executable file
1276
kernel/fs/littlefs/scripts/perfbd.py
Executable file
File diff suppressed because it is too large
Load Diff
1592
kernel/fs/littlefs/scripts/plot.py
Executable file
1592
kernel/fs/littlefs/scripts/plot.py
Executable file
File diff suppressed because it is too large
Load Diff
1262
kernel/fs/littlefs/scripts/plotmpl.py
Executable file
1262
kernel/fs/littlefs/scripts/plotmpl.py
Executable file
File diff suppressed because it is too large
Load Diff
478
kernel/fs/littlefs/scripts/prettyasserts.py
Executable file
478
kernel/fs/littlefs/scripts/prettyasserts.py
Executable file
@ -0,0 +1,478 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Preprocessor that makes asserts easier to debug.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/prettyasserts.py -p LFS_ASSERT lfs.c -o lfs.a.c
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# NOTE the use of macros here helps keep a consistent stack depth which
|
||||
# tools may rely on.
|
||||
#
|
||||
# If compilation errors are noisy consider using -ftrack-macro-expansion=0.
|
||||
#
|
||||
|
||||
LIMIT = 16
|
||||
|
||||
CMP = {
|
||||
'==': 'eq',
|
||||
'!=': 'ne',
|
||||
'<=': 'le',
|
||||
'>=': 'ge',
|
||||
'<': 'lt',
|
||||
'>': 'gt',
|
||||
}
|
||||
|
||||
LEXEMES = {
|
||||
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
|
||||
'assert': ['assert'],
|
||||
'arrow': ['=>'],
|
||||
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
|
||||
'paren': [r'\(', r'\)'],
|
||||
'cmp': CMP.keys(),
|
||||
'logic': [r'\&\&', r'\|\|'],
|
||||
'sep': [':', ';', r'\{', r'\}', ','],
|
||||
'op': ['->'], # specifically ops that conflict with cmp
|
||||
}
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def write_header(f, limit=LIMIT):
|
||||
f.writeln("// Generated by %s:" % sys.argv[0])
|
||||
f.writeln("//")
|
||||
f.writeln("// %s" % ' '.join(sys.argv))
|
||||
f.writeln("//")
|
||||
f.writeln()
|
||||
|
||||
f.writeln("#include <stdbool.h>")
|
||||
f.writeln("#include <stdint.h>")
|
||||
f.writeln("#include <inttypes.h>")
|
||||
f.writeln("#include <stdio.h>")
|
||||
f.writeln("#include <string.h>")
|
||||
f.writeln("#include <signal.h>")
|
||||
# give source a chance to define feature macros
|
||||
f.writeln("#undef _FEATURES_H")
|
||||
f.writeln()
|
||||
|
||||
# write print macros
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_bool(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%s\", *(const bool*)v ? \"true\" : \"false\");")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_int(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%\"PRIiMAX, *(const intmax_t*)v);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_ptr(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%p\", v);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_mem(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" const uint8_t *v_ = v;")
|
||||
f.writeln(" printf(\"\\\"\");")
|
||||
f.writeln(" for (size_t i = 0; i < size && i < %d; i++) {" % limit)
|
||||
f.writeln(" if (v_[i] >= ' ' && v_[i] <= '~') {")
|
||||
f.writeln(" printf(\"%c\", v_[i]);")
|
||||
f.writeln(" } else {")
|
||||
f.writeln(" printf(\"\\\\x%02x\", v_[i]);")
|
||||
f.writeln(" }")
|
||||
f.writeln(" }")
|
||||
f.writeln(" if (size > %d) {" % limit)
|
||||
f.writeln(" printf(\"...\");")
|
||||
f.writeln(" }")
|
||||
f.writeln(" printf(\"\\\"\");")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_str(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" __pretty_assert_print_mem(v, size);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused, noinline))")
|
||||
f.writeln("static void __pretty_assert_fail(")
|
||||
f.writeln(" const char *file, int line,")
|
||||
f.writeln(" void (*type_print_cb)(const void*, size_t),")
|
||||
f.writeln(" const char *cmp,")
|
||||
f.writeln(" const void *lh, size_t lsize,")
|
||||
f.writeln(" const void *rh, size_t rsize) {")
|
||||
f.writeln(" printf(\"%s:%d:assert: assert failed with \", file, line);")
|
||||
f.writeln(" type_print_cb(lh, lsize);")
|
||||
f.writeln(" printf(\", expected %s \", cmp);")
|
||||
f.writeln(" type_print_cb(rh, rsize);")
|
||||
f.writeln(" printf(\"\\n\");")
|
||||
f.writeln(" fflush(NULL);")
|
||||
f.writeln(" raise(SIGABRT);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
|
||||
# write assert macros
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_BOOL_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" bool _lh = !!(lh); \\")
|
||||
f.writeln(" bool _rh = !!(rh); \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_bool, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" &_lh, 0, \\")
|
||||
f.writeln(" &_rh, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_INT_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" __typeof__(lh) _lh = lh; \\")
|
||||
f.writeln(" __typeof__(lh) _rh = rh; \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_int, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" &(intmax_t){_lh}, 0, \\")
|
||||
f.writeln(" &(intmax_t){_rh}, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_MEM_%s(lh, rh, size) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const void *_lh = lh; \\")
|
||||
f.writeln(" const void *_rh = rh; \\")
|
||||
f.writeln(" if (!(memcmp(_lh, _rh, size) %s 0)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_mem, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" _lh, size, \\")
|
||||
f.writeln(" _rh, size); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_STR_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const char *_lh = lh; \\")
|
||||
f.writeln(" const char *_rh = rh; \\")
|
||||
f.writeln(" if (!(strcmp(_lh, _rh) %s 0)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_str, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" _lh, strlen(_lh), \\")
|
||||
f.writeln(" _rh, strlen(_rh)); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
# Only EQ and NE are supported when compared to NULL.
|
||||
if cmp not in ['eq', 'ne']:
|
||||
continue
|
||||
f.writeln("#define __PRETTY_ASSERT_PTR_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const void *_lh = (const void*)(uintptr_t)lh; \\")
|
||||
f.writeln(" const void *_rh = (const void*)(uintptr_t)rh; \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_ptr, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" (const void*){_lh}, 0, \\")
|
||||
f.writeln(" (const void*){_rh}, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
f.writeln()
|
||||
f.writeln()
|
||||
|
||||
def mkassert(type, cmp, lh, rh, size=None):
|
||||
if size is not None:
|
||||
return ("__PRETTY_ASSERT_%s_%s(%s, %s, %s)"
|
||||
% (type.upper(), cmp.upper(), lh, rh, size))
|
||||
else:
|
||||
return ("__PRETTY_ASSERT_%s_%s(%s, %s)"
|
||||
% (type.upper(), cmp.upper(), lh, rh))
|
||||
|
||||
|
||||
# simple recursive descent parser
|
||||
class ParseFailure(Exception):
|
||||
def __init__(self, expected, found):
|
||||
self.expected = expected
|
||||
self.found = found
|
||||
|
||||
def __str__(self):
|
||||
return "expected %r, found %s..." % (
|
||||
self.expected, repr(self.found)[:70])
|
||||
|
||||
class Parser:
|
||||
def __init__(self, in_f, lexemes=LEXEMES):
|
||||
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
|
||||
for n, l in lexemes.items())
|
||||
p = re.compile(p, re.DOTALL)
|
||||
data = in_f.read()
|
||||
tokens = []
|
||||
line = 1
|
||||
col = 0
|
||||
while True:
|
||||
m = p.search(data)
|
||||
if m:
|
||||
if m.start() > 0:
|
||||
tokens.append((None, data[:m.start()], line, col))
|
||||
tokens.append((m.lastgroup, m.group(), line, col))
|
||||
data = data[m.end():]
|
||||
else:
|
||||
tokens.append((None, data, line, col))
|
||||
break
|
||||
self.tokens = tokens
|
||||
self.off = 0
|
||||
|
||||
def lookahead(self, *pattern):
|
||||
if self.off < len(self.tokens):
|
||||
token = self.tokens[self.off]
|
||||
if token[0] in pattern or token[1] in pattern:
|
||||
self.m = token[1]
|
||||
return self.m
|
||||
self.m = None
|
||||
return self.m
|
||||
|
||||
def accept(self, *patterns):
|
||||
m = self.lookahead(*patterns)
|
||||
if m is not None:
|
||||
self.off += 1
|
||||
return m
|
||||
|
||||
def expect(self, *patterns):
|
||||
m = self.accept(*patterns)
|
||||
if not m:
|
||||
raise ParseFailure(patterns, self.tokens[self.off:])
|
||||
return m
|
||||
|
||||
def push(self):
|
||||
return self.off
|
||||
|
||||
def pop(self, state):
|
||||
self.off = state
|
||||
|
||||
def p_assert(p):
|
||||
state = p.push()
|
||||
|
||||
# assert(memcmp(a,b,size) cmp 0)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
p.expect('memcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('mem', CMP[cmp], lh, rh, size)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(strcmp(a,b) cmp 0)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
p.expect('strcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('str', CMP[cmp], lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(a cmp b)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
if rh == 'NULL' or lh == 'NULL':
|
||||
return mkassert('ptr', CMP[cmp], lh, rh)
|
||||
return mkassert('int', CMP[cmp], lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(a)?
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_exprs(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('bool', 'eq', lh, 'true')
|
||||
|
||||
def p_expr(p):
|
||||
res = []
|
||||
while True:
|
||||
if p.accept('('):
|
||||
res.append(p.m)
|
||||
while True:
|
||||
res.append(p_exprs(p))
|
||||
if p.accept('sep'):
|
||||
res.append(p.m)
|
||||
else:
|
||||
break
|
||||
res.append(p.expect(')'))
|
||||
elif p.lookahead('assert'):
|
||||
state = p.push()
|
||||
try:
|
||||
res.append(p_assert(p))
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
res.append(p.expect('assert'))
|
||||
elif p.accept('string', 'op', 'ws', None):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def p_exprs(p):
|
||||
res = []
|
||||
while True:
|
||||
res.append(p_expr(p))
|
||||
if p.accept('cmp', 'logic', ','):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def p_stmt(p):
|
||||
ws = p.accept('ws') or ''
|
||||
|
||||
# memcmp(lh,rh,size) => 0?
|
||||
if p.lookahead('memcmp'):
|
||||
state = p.push()
|
||||
try:
|
||||
p.expect('memcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
p.expect('=>') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
return ws + mkassert('mem', 'eq', lh, rh, size)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# strcmp(lh,rh) => 0?
|
||||
if p.lookahead('strcmp'):
|
||||
state = p.push()
|
||||
try:
|
||||
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
p.expect('=>') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
return ws + mkassert('str', 'eq', lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# lh => rh?
|
||||
lh = p_exprs(p)
|
||||
if p.accept('=>'):
|
||||
rh = p_exprs(p)
|
||||
return ws + mkassert('int', 'eq', lh, rh)
|
||||
else:
|
||||
return ws + lh
|
||||
|
||||
def main(input=None, output=None, pattern=[], limit=LIMIT):
|
||||
with openio(input or '-', 'r') as in_f:
|
||||
# create parser
|
||||
lexemes = LEXEMES.copy()
|
||||
lexemes['assert'] += pattern
|
||||
p = Parser(in_f, lexemes)
|
||||
|
||||
with openio(output or '-', 'w') as f:
|
||||
def writeln(s=''):
|
||||
f.write(s)
|
||||
f.write('\n')
|
||||
f.writeln = writeln
|
||||
|
||||
# write extra verbose asserts
|
||||
write_header(f, limit=limit)
|
||||
if input is not None:
|
||||
f.writeln("#line %d \"%s\"" % (1, input))
|
||||
|
||||
# parse and write out stmt at a time
|
||||
try:
|
||||
while True:
|
||||
f.write(p_stmt(p))
|
||||
if p.accept('sep'):
|
||||
f.write(p.m)
|
||||
else:
|
||||
break
|
||||
except ParseFailure as e:
|
||||
print('warning: %s' % e)
|
||||
pass
|
||||
|
||||
for i in range(p.off, len(p.tokens)):
|
||||
f.write(p.tokens[i][1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Preprocessor that makes asserts easier to debug.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'input',
|
||||
help="Input C file.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
required=True,
|
||||
help="Output C file.")
|
||||
parser.add_argument(
|
||||
'-p', '--pattern',
|
||||
action='append',
|
||||
help="Regex patterns to search for starting an assert statement. This"
|
||||
" implicitly includes \"assert\" and \"=>\".")
|
||||
parser.add_argument(
|
||||
'-l', '--limit',
|
||||
type=lambda x: int(x, 0),
|
||||
default=LIMIT,
|
||||
help="Maximum number of characters to display in strcmp and memcmp. "
|
||||
"Defaults to %r." % LIMIT)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
26
kernel/fs/littlefs/scripts/readblock.py
Executable file
26
kernel/fs/littlefs/scripts/readblock.py
Executable file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
def main(args):
|
||||
with open(args.disk, 'rb') as f:
|
||||
f.seek(args.block * args.block_size)
|
||||
block = (f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# what did you expect?
|
||||
print("%-8s %-s" % ('off', 'data'))
|
||||
return sp.run(['xxd', '-g1', '-'], input=block).returncode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hex dump a specific block in a disk.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block', type=lambda x: int(x, 0),
|
||||
help="Address of block to dump.")
|
||||
sys.exit(main(parser.parse_args()))
|
399
kernel/fs/littlefs/scripts/readmdir.py
Executable file
399
kernel/fs/littlefs/scripts/readmdir.py
Executable file
@ -0,0 +1,399 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import binascii
|
||||
import sys
|
||||
import itertools as it
|
||||
|
||||
TAG_TYPES = {
|
||||
'splice': (0x700, 0x400),
|
||||
'create': (0x7ff, 0x401),
|
||||
'delete': (0x7ff, 0x4ff),
|
||||
'name': (0x700, 0x000),
|
||||
'reg': (0x7ff, 0x001),
|
||||
'dir': (0x7ff, 0x002),
|
||||
'superblock': (0x7ff, 0x0ff),
|
||||
'struct': (0x700, 0x200),
|
||||
'dirstruct': (0x7ff, 0x200),
|
||||
'ctzstruct': (0x7ff, 0x202),
|
||||
'inlinestruct': (0x7ff, 0x201),
|
||||
'userattr': (0x700, 0x300),
|
||||
'tail': (0x700, 0x600),
|
||||
'softtail': (0x7ff, 0x600),
|
||||
'hardtail': (0x7ff, 0x601),
|
||||
'gstate': (0x700, 0x700),
|
||||
'movestate': (0x7ff, 0x7ff),
|
||||
'crc': (0x700, 0x500),
|
||||
'ccrc': (0x780, 0x500),
|
||||
'fcrc': (0x7ff, 0x5ff),
|
||||
}
|
||||
|
||||
class Tag:
|
||||
def __init__(self, *args):
|
||||
if len(args) == 1:
|
||||
self.tag = args[0]
|
||||
elif len(args) == 3:
|
||||
if isinstance(args[0], str):
|
||||
type = TAG_TYPES[args[0]][1]
|
||||
else:
|
||||
type = args[0]
|
||||
|
||||
if isinstance(args[1], str):
|
||||
id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
|
||||
else:
|
||||
id = args[1]
|
||||
|
||||
if isinstance(args[2], str):
|
||||
size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
|
||||
else:
|
||||
size = args[2]
|
||||
|
||||
self.tag = (type << 20) | (id << 10) | size
|
||||
else:
|
||||
assert False
|
||||
|
||||
@property
|
||||
def isvalid(self):
|
||||
return not bool(self.tag & 0x80000000)
|
||||
|
||||
@property
|
||||
def isattr(self):
|
||||
return not bool(self.tag & 0x40000000)
|
||||
|
||||
@property
|
||||
def iscompactable(self):
|
||||
return bool(self.tag & 0x20000000)
|
||||
|
||||
@property
|
||||
def isunique(self):
|
||||
return not bool(self.tag & 0x10000000)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def type1(self):
|
||||
return (self.tag & 0x70000000) >> 20
|
||||
|
||||
@property
|
||||
def type3(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return (self.tag & 0x000ffc00) >> 10
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return (self.tag & 0x000003ff) >> 0
|
||||
|
||||
@property
|
||||
def dsize(self):
|
||||
return 4 + (self.size if self.size != 0x3ff else 0)
|
||||
|
||||
@property
|
||||
def chunk(self):
|
||||
return self.type & 0xff
|
||||
|
||||
@property
|
||||
def schunk(self):
|
||||
return struct.unpack('b', struct.pack('B', self.chunk))[0]
|
||||
|
||||
def is_(self, type):
|
||||
try:
|
||||
if ' ' in type:
|
||||
type1, type3 = type.split()
|
||||
return (self.is_(type1) and
|
||||
(self.type & ~TAG_TYPES[type1][0]) == int(type3, 0))
|
||||
|
||||
return self.type == int(type, 0)
|
||||
|
||||
except (ValueError, KeyError):
|
||||
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
|
||||
|
||||
def mkmask(self):
|
||||
return Tag(
|
||||
0x700 if self.isunique else 0x7ff,
|
||||
0x3ff if self.isattr else 0,
|
||||
0)
|
||||
|
||||
def chid(self, nid):
|
||||
ntag = Tag(self.type, nid, self.size)
|
||||
if hasattr(self, 'off'): ntag.off = self.off
|
||||
if hasattr(self, 'data'): ntag.data = self.data
|
||||
if hasattr(self, 'ccrc'): ntag.crc = self.crc
|
||||
if hasattr(self, 'erased'): ntag.erased = self.erased
|
||||
return ntag
|
||||
|
||||
def typerepr(self):
|
||||
if (self.is_('ccrc')
|
||||
and getattr(self, 'ccrc', 0xffffffff) != 0xffffffff):
|
||||
crc_status = ' (bad)'
|
||||
elif self.is_('fcrc') and getattr(self, 'erased', False):
|
||||
crc_status = ' (era)'
|
||||
else:
|
||||
crc_status = ''
|
||||
|
||||
reverse_types = {v: k for k, v in TAG_TYPES.items()}
|
||||
for prefix in range(12):
|
||||
mask = 0x7ff & ~((1 << prefix)-1)
|
||||
if (mask, self.type & mask) in reverse_types:
|
||||
type = reverse_types[mask, self.type & mask]
|
||||
if prefix > 0:
|
||||
return '%s %#x%s' % (
|
||||
type, self.type & ((1 << prefix)-1), crc_status)
|
||||
else:
|
||||
return '%s%s' % (type, crc_status)
|
||||
else:
|
||||
return '%02x%s' % (self.type, crc_status)
|
||||
|
||||
def idrepr(self):
|
||||
return repr(self.id) if self.id != 0x3ff else '.'
|
||||
|
||||
def sizerepr(self):
|
||||
return repr(self.size) if self.size != 0x3ff else 'x'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.id, self.type) < (other.id, other.type)
|
||||
|
||||
def __bool__(self):
|
||||
return self.isvalid
|
||||
|
||||
def __int__(self):
|
||||
return self.tag
|
||||
|
||||
def __index__(self):
|
||||
return self.tag
|
||||
|
||||
class MetadataPair:
|
||||
def __init__(self, blocks):
|
||||
if len(blocks) > 1:
|
||||
self.pair = [MetadataPair([block]) for block in blocks]
|
||||
self.pair = sorted(self.pair, reverse=True)
|
||||
|
||||
self.data = self.pair[0].data
|
||||
self.rev = self.pair[0].rev
|
||||
self.tags = self.pair[0].tags
|
||||
self.ids = self.pair[0].ids
|
||||
self.log = self.pair[0].log
|
||||
self.all_ = self.pair[0].all_
|
||||
return
|
||||
|
||||
self.pair = [self]
|
||||
self.data = blocks[0]
|
||||
block = self.data
|
||||
|
||||
self.rev, = struct.unpack('<I', block[0:4])
|
||||
crc = binascii.crc32(block[0:4])
|
||||
fcrctag = None
|
||||
fcrcdata = None
|
||||
|
||||
# parse tags
|
||||
corrupt = False
|
||||
tag = Tag(0xffffffff)
|
||||
off = 4
|
||||
self.log = []
|
||||
self.all_ = []
|
||||
while len(block) - off >= 4:
|
||||
ntag, = struct.unpack('>I', block[off:off+4])
|
||||
|
||||
tag = Tag((int(tag) ^ ntag) & 0x7fffffff)
|
||||
tag.off = off + 4
|
||||
tag.data = block[off+4:off+tag.dsize]
|
||||
if tag.is_('ccrc'):
|
||||
crc = binascii.crc32(block[off:off+2*4], crc)
|
||||
else:
|
||||
crc = binascii.crc32(block[off:off+tag.dsize], crc)
|
||||
tag.crc = crc
|
||||
off += tag.dsize
|
||||
|
||||
self.all_.append(tag)
|
||||
|
||||
if tag.is_('fcrc') and len(tag.data) == 8:
|
||||
fcrctag = tag
|
||||
fcrcdata = struct.unpack('<II', tag.data)
|
||||
elif tag.is_('ccrc'):
|
||||
# is valid commit?
|
||||
if crc != 0xffffffff:
|
||||
corrupt = True
|
||||
if not corrupt:
|
||||
self.log = self.all_.copy()
|
||||
# end of commit?
|
||||
if fcrcdata:
|
||||
fcrcsize, fcrc = fcrcdata
|
||||
fcrc_ = 0xffffffff ^ binascii.crc32(
|
||||
block[off:off+fcrcsize])
|
||||
if fcrc_ == fcrc:
|
||||
fcrctag.erased = True
|
||||
corrupt = True
|
||||
|
||||
# reset tag parsing
|
||||
crc = 0
|
||||
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
|
||||
fcrctag = None
|
||||
fcrcdata = None
|
||||
|
||||
# find active ids
|
||||
self.ids = list(it.takewhile(
|
||||
lambda id: Tag('name', id, 0) in self,
|
||||
it.count()))
|
||||
|
||||
# find most recent tags
|
||||
self.tags = []
|
||||
for tag in self.log:
|
||||
if tag.is_('crc') or tag.is_('splice'):
|
||||
continue
|
||||
elif tag.id == 0x3ff:
|
||||
if tag in self and self[tag] is tag:
|
||||
self.tags.append(tag)
|
||||
else:
|
||||
# id could have change, I know this is messy and slow
|
||||
# but it works
|
||||
for id in self.ids:
|
||||
ntag = tag.chid(id)
|
||||
if ntag in self and self[ntag] is tag:
|
||||
self.tags.append(ntag)
|
||||
|
||||
self.tags = sorted(self.tags)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.log)
|
||||
|
||||
def __lt__(self, other):
|
||||
# corrupt blocks don't count
|
||||
if not self or not other:
|
||||
return bool(other)
|
||||
|
||||
# use sequence arithmetic to avoid overflow
|
||||
return not ((other.rev - self.rev) & 0x80000000)
|
||||
|
||||
def __contains__(self, args):
|
||||
try:
|
||||
self[args]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def __getitem__(self, args):
|
||||
if isinstance(args, tuple):
|
||||
gmask, gtag = args
|
||||
else:
|
||||
gmask, gtag = args.mkmask(), args
|
||||
|
||||
gdiff = 0
|
||||
for tag in reversed(self.log):
|
||||
if (gmask.id != 0 and tag.is_('splice') and
|
||||
tag.id <= gtag.id - gdiff):
|
||||
if tag.is_('create') and tag.id == gtag.id - gdiff:
|
||||
# creation point
|
||||
break
|
||||
|
||||
gdiff += tag.schunk
|
||||
|
||||
if ((int(gmask) & int(tag)) ==
|
||||
(int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
|
||||
if tag.size == 0x3ff:
|
||||
# deleted
|
||||
break
|
||||
|
||||
return tag
|
||||
|
||||
raise KeyError(gmask, gtag)
|
||||
|
||||
def _dump_tags(self, tags, f=sys.stdout, truncate=True):
|
||||
f.write("%-8s %-8s %-13s %4s %4s" % (
|
||||
'off', 'tag', 'type', 'id', 'len'))
|
||||
if truncate:
|
||||
f.write(' data (truncated)')
|
||||
f.write('\n')
|
||||
|
||||
for tag in tags:
|
||||
f.write("%08x: %08x %-14s %3s %4s" % (
|
||||
tag.off, tag,
|
||||
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
|
||||
if truncate:
|
||||
f.write(" %-23s %-8s\n" % (
|
||||
' '.join('%02x' % c for c in tag.data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[:8]))))
|
||||
else:
|
||||
f.write("\n")
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
tag.off+i,
|
||||
' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
def dump_tags(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.tags, f=f, truncate=truncate)
|
||||
|
||||
def dump_log(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.log, f=f, truncate=truncate)
|
||||
|
||||
def dump_all(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.all_, f=f, truncate=truncate)
|
||||
|
||||
def main(args):
|
||||
blocks = []
|
||||
with open(args.disk, 'rb') as f:
|
||||
for block in [args.block1, args.block2]:
|
||||
if block is None:
|
||||
continue
|
||||
f.seek(block * args.block_size)
|
||||
blocks.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# find most recent pair
|
||||
mdir = MetadataPair(blocks)
|
||||
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
print("mdir {%s} rev %d%s%s%s" % (
|
||||
', '.join('%#x' % b
|
||||
for b in [args.block1, args.block2]
|
||||
if b is not None),
|
||||
mdir.rev,
|
||||
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
|
||||
if len(mdir.pair) > 1 else '',
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
if args.all:
|
||||
mdir.dump_all(truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(truncate=not args.no_truncate)
|
||||
|
||||
return 0 if mdir else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump useful info about metadata pairs in littlefs.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', type=lambda x: int(x, 0),
|
||||
help="First block address for finding the metadata pair.")
|
||||
parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0),
|
||||
help="Second block address for finding the metadata pair.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Don't truncate large amounts of data.")
|
||||
sys.exit(main(parser.parse_args()))
|
183
kernel/fs/littlefs/scripts/readtree.py
Executable file
183
kernel/fs/littlefs/scripts/readtree.py
Executable file
@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import sys
|
||||
import json
|
||||
import io
|
||||
import itertools as it
|
||||
from readmdir import Tag, MetadataPair
|
||||
|
||||
def main(args):
|
||||
superblock = None
|
||||
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
|
||||
dirs = []
|
||||
mdirs = []
|
||||
corrupted = []
|
||||
cycle = False
|
||||
with open(args.disk, 'rb') as f:
|
||||
tail = (args.block1, args.block2)
|
||||
hard = False
|
||||
while True:
|
||||
for m in it.chain((m for d in dirs for m in d), mdirs):
|
||||
if set(m.blocks) == set(tail):
|
||||
# cycle detected
|
||||
cycle = m.blocks
|
||||
if cycle:
|
||||
break
|
||||
|
||||
# load mdir
|
||||
data = []
|
||||
blocks = {}
|
||||
for block in tail:
|
||||
f.seek(block * args.block_size)
|
||||
data.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
blocks[id(data[-1])] = block
|
||||
|
||||
mdir = MetadataPair(data)
|
||||
mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
|
||||
|
||||
# fetch some key metadata as a we scan
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
# have superblock?
|
||||
try:
|
||||
nsuperblock = mdir[
|
||||
Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
|
||||
superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# have gstate?
|
||||
try:
|
||||
ngstate = mdir[Tag('movestate', 0, 0)]
|
||||
gstate = bytes((a or 0) ^ (b or 0)
|
||||
for a,b in it.zip_longest(gstate, ngstate.data))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# corrupted?
|
||||
if not mdir:
|
||||
corrupted.append(mdir)
|
||||
|
||||
# add to directories
|
||||
mdirs.append(mdir)
|
||||
if mdir.tail is None or not mdir.tail.is_('hardtail'):
|
||||
dirs.append(mdirs)
|
||||
mdirs = []
|
||||
|
||||
if mdir.tail is None:
|
||||
break
|
||||
|
||||
tail = struct.unpack('<II', mdir.tail.data)
|
||||
hard = mdir.tail.is_('hardtail')
|
||||
|
||||
# find paths
|
||||
dirtable = {}
|
||||
for dir in dirs:
|
||||
dirtable[frozenset(dir[0].blocks)] = dir
|
||||
|
||||
pending = [("/", dirs[0])]
|
||||
while pending:
|
||||
path, dir = pending.pop(0)
|
||||
for mdir in dir:
|
||||
for tag in mdir.tags:
|
||||
if tag.is_('dir'):
|
||||
try:
|
||||
npath = tag.data.decode('utf8')
|
||||
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
|
||||
nblocks = struct.unpack('<II', dirstruct.data)
|
||||
nmdir = dirtable[frozenset(nblocks)]
|
||||
pending.append(((path + '/' + npath), nmdir))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
dir[0].path = path.replace('//', '/')
|
||||
|
||||
# print littlefs + version info
|
||||
version = ('?', '?')
|
||||
if superblock:
|
||||
version = tuple(reversed(
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.log, args.all]) else ""))
|
||||
|
||||
# print gstate
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
|
||||
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
|
||||
if tag.size or not tag.isvalid:
|
||||
print(" orphans >=%d" % max(tag.size, 1))
|
||||
if tag.type:
|
||||
print(" move dir {%#x, %#x} id %d" % (
|
||||
blocks[0], blocks[1], tag.id))
|
||||
|
||||
# print mdir info
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
|
||||
f = io.StringIO()
|
||||
if args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'|',
|
||||
line))
|
||||
|
||||
errcode = 0
|
||||
for mdir in corrupted:
|
||||
errcode = errcode or 1
|
||||
print("*** corrupted mdir {%#x, %#x}! ***" % (
|
||||
mdir.blocks[0], mdir.blocks[1]))
|
||||
|
||||
if cycle:
|
||||
errcode = errcode or 2
|
||||
print("*** cycle detected {%#x, %#x}! ***" % (
|
||||
cycle[0], cycle[1]))
|
||||
|
||||
return errcode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump semantic info about the metadata tree in littlefs")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', nargs='?', default=0,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional first block address for finding the superblock.")
|
||||
parser.add_argument('block2', nargs='?', default=1,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional second block address for finding the superblock.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Show the full contents of files/attrs/tags.")
|
||||
sys.exit(main(parser.parse_args()))
|
735
kernel/fs/littlefs/scripts/stack.py
Executable file
735
kernel/fs/littlefs/scripts/stack.py
Executable file
@ -0,0 +1,735 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/stack.py lfs.ci lfs_util.ci -Slimit
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# size results
|
||||
class StackResult(co.namedtuple('StackResult', [
|
||||
'file', 'function', 'frame', 'limit', 'children'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['frame', 'limit']
|
||||
_sort = ['limit', 'frame']
|
||||
_types = {'frame': Int, 'limit': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='',
|
||||
frame=0, limit=0, children=set()):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(frame), Int(limit),
|
||||
children)
|
||||
|
||||
def __add__(self, other):
|
||||
return StackResult(self.file, self.function,
|
||||
self.frame + other.frame,
|
||||
max(self.limit, other.limit),
|
||||
self.children | other.children)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(ci_paths, *,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m_ = k_pattern.match(rest)
|
||||
if not m_:
|
||||
return (node, rest)
|
||||
k, rest = m_.group(1), rest[m_.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m_ = v_pattern.match(rest)
|
||||
assert m_, "unexpected %r" % rest[0:1]
|
||||
v, rest = m_.group(1) or m_.group(2), rest[m_.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
callgraph = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in ci_paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m_ = f_pattern.match(info['label'])
|
||||
if m_:
|
||||
function, file, size, type = m_.groups()
|
||||
if (not args.get('quiet')
|
||||
and 'static' not in type
|
||||
and 'bounded' not in type):
|
||||
print("warning: "
|
||||
"found non-static stack for %s (%s, %s)" % (
|
||||
function, type, size))
|
||||
_, _, _, targets = callgraph[info['title']]
|
||||
callgraph[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = callgraph[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
# discard internal functions
|
||||
if not everything and s_function.startswith('__'):
|
||||
continue
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(s_file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(s_file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# smiplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(s_file)]) == os.getcwd():
|
||||
s_file = os.path.relpath(s_file)
|
||||
else:
|
||||
s_file = os.path.abspath(s_file)
|
||||
|
||||
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||
callgraph = callgraph_
|
||||
|
||||
if not everything:
|
||||
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
# discard filtered sources
|
||||
if sources is not None and not any(
|
||||
os.path.abspath(s_file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
# discard internal functions
|
||||
if s_function.startswith('__'):
|
||||
continue
|
||||
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||
callgraph = callgraph_
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in callgraph:
|
||||
return 0
|
||||
_, _, frame, targets = callgraph[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return m.inf
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_children(targets):
|
||||
children = set()
|
||||
for target in targets:
|
||||
if target in callgraph:
|
||||
t_file, t_function, _, _ = callgraph[target]
|
||||
children.add((t_file, t_function))
|
||||
return children
|
||||
|
||||
# build results
|
||||
results = []
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
limit = find_limit(source)
|
||||
children = find_children(targets)
|
||||
results.append(StackResult(s_file, s_function, frame, limit, children))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
tree=False,
|
||||
depth=1,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# adjust the name width based on the expected call depth, though
|
||||
# note this doesn't really work with unbounded recursion
|
||||
if not summary and not m.isinf(depth):
|
||||
widths[0] += 4*(depth-1)
|
||||
|
||||
# print the tree recursively
|
||||
if not tree:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], lines[0][0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], lines[0][1:-1])),
|
||||
lines[0][-1]))
|
||||
|
||||
if not summary:
|
||||
line_table = {n: l for n, l in zip(names, lines[1:-1])}
|
||||
|
||||
def recurse(names_, depth_, prefixes=('', '', '', '')):
|
||||
for i, name in enumerate(names_):
|
||||
if name not in line_table:
|
||||
continue
|
||||
line = line_table[name]
|
||||
is_last = (i == len(names_)-1)
|
||||
|
||||
print('%s%-*s ' % (
|
||||
prefixes[0+is_last],
|
||||
widths[0] - (
|
||||
len(prefixes[0+is_last])
|
||||
if not m.isinf(depth) else 0),
|
||||
line[0]),
|
||||
end='')
|
||||
if not tree:
|
||||
print(' %s%s' % (
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]),
|
||||
end='')
|
||||
print()
|
||||
|
||||
# recurse?
|
||||
if name in table and depth_ > 1:
|
||||
children = {
|
||||
','.join(str(getattr(Result(*c), k) or '') for k in by)
|
||||
for c in table[name].children}
|
||||
recurse(
|
||||
# note we're maintaining sort order
|
||||
[n for n in names if n in children],
|
||||
depth_-1,
|
||||
(prefixes[2+is_last] + "|-> ",
|
||||
prefixes[2+is_last] + "'-> ",
|
||||
prefixes[2+is_last] + "| ",
|
||||
prefixes[2+is_last] + " "))
|
||||
|
||||
recurse(names, depth)
|
||||
|
||||
if not tree:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], lines[-1][0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], lines[-1][1:-1])),
|
||||
lines[-1][-1]))
|
||||
|
||||
|
||||
def main(ci_paths,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# it doesn't really make sense to not have a depth with tree,
|
||||
# so assume depth=inf if tree by default
|
||||
if args.get('depth') is None:
|
||||
args['depth'] = m.inf if args['tree'] else 1
|
||||
elif args.get('depth') == 0:
|
||||
args['depth'] = m.inf
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(ci_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('stack_'+k in r and r['stack_'+k].strip()
|
||||
for k in StackResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(StackResult(
|
||||
**{k: r[k] for k in StackResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['stack_'+k] for k in StackResult._fields
|
||||
if 'stack_'+k in r and r['stack_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(StackResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else StackResult._sort)),
|
||||
reverse=reverse ^ (not k or k in StackResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else StackResult._by)
|
||||
+ ['stack_'+k for k in (
|
||||
fields if fields is not None else StackResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else StackResult._by)}
|
||||
| {'stack_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else StackResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('stack_'+k in r and r['stack_'+k].strip()
|
||||
for k in StackResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(StackResult(
|
||||
**{k: r[k] for k in StackResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['stack_'+k] for k in StackResult._fields
|
||||
if 'stack_'+k in r and r['stack_'+k].strip()}))
|
||||
except TypeError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(StackResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(StackResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
# error on recursion
|
||||
if args.get('error_on_recursion') and any(
|
||||
m.isinf(float(r.limit)) for r in results):
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'ci_paths',
|
||||
nargs='*',
|
||||
help="Input *.ci files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=StackResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=StackResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--tree',
|
||||
action='store_true',
|
||||
help="Only show the function call tree.")
|
||||
parser.add_argument(
|
||||
'-Z', '--depth',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Depth of function calls to show. 0 shows all calls but may not "
|
||||
"terminate!")
|
||||
parser.add_argument(
|
||||
'-e', '--error-on-recursion',
|
||||
action='store_true',
|
||||
help="Error if any functions are recursive.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
652
kernel/fs/littlefs/scripts/structs.py
Executable file
652
kernel/fs/littlefs/scripts/structs.py
Executable file
@ -0,0 +1,652 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/structs.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# struct size results
|
||||
class StructResult(co.namedtuple('StructResult', ['file', 'struct', 'size'])):
|
||||
_by = ['file', 'struct']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', struct='', size=0):
|
||||
return super().__new__(cls, file, struct,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return StructResult(self.file, self.struct,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
internal=False,
|
||||
**args):
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*'
|
||||
'|.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# find files, we want to filter by structs in .h files
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
results_ = []
|
||||
is_struct = False
|
||||
s_name = None
|
||||
s_file = None
|
||||
s_size = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_struct:
|
||||
file = files.get(s_file, '?')
|
||||
results_.append(StructResult(file, s_name, s_size))
|
||||
is_struct = (m.group('tag') == 'DW_TAG_structure_type')
|
||||
elif m.group('name'):
|
||||
s_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
s_file = int(m.group('file'))
|
||||
elif m.group('size'):
|
||||
s_size = int(m.group('size'))
|
||||
if is_struct:
|
||||
file = files.get(s_file, '?')
|
||||
results_.append(StructResult(file, s_name, s_size))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
for r in results_:
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(r.file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(r.file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# limit to .h files unless --internal
|
||||
if not internal and not r.file.endswith('.h'):
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(r.file)]) == os.getcwd():
|
||||
file = os.path.relpath(r.file)
|
||||
else:
|
||||
file = os.path.abspath(r.file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('struct_'+k in r and r['struct_'+k].strip()
|
||||
for k in StructResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(StructResult(
|
||||
**{k: r[k] for k in StructResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['struct_'+k]
|
||||
for k in StructResult._fields
|
||||
if 'struct_'+k in r
|
||||
and r['struct_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(StructResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else StructResult._sort)),
|
||||
reverse=reverse ^ (not k or k in StructResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else StructResult._by)
|
||||
+ ['struct_'+k for k in (
|
||||
fields if fields is not None else StructResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else StructResult._by)}
|
||||
| {'struct_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else StructResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('struct_'+k in r and r['struct_'+k].strip()
|
||||
for k in StructResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(StructResult(
|
||||
**{k: r[k] for k in StructResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['struct_'+k]
|
||||
for k in StructResult._fields
|
||||
if 'struct_'+k in r
|
||||
and r['struct_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(StructResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(StructResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['struct'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=StructResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=StructResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--internal',
|
||||
action='store_true',
|
||||
help="Also show structs in .c files.")
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
829
kernel/fs/littlefs/scripts/summary.py
Executable file
829
kernel/fs/littlefs/scripts/summary.py
Executable file
@ -0,0 +1,829 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/code.py lfs.o lfs_util.o -q -o lfs.code.csv
|
||||
# ./scripts/data.py lfs.o lfs_util.o -q -o lfs.data.csv
|
||||
# ./scripts/summary.py lfs.code.csv lfs.data.csv -q -o lfs.csv
|
||||
# ./scripts/summary.py -Y lfs.csv -f code=code_size,data=data_size
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import functools as ft
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
# supported merge operations
|
||||
#
|
||||
# this is a terrible way to express these
|
||||
#
|
||||
OPS = {
|
||||
'sum': lambda xs: sum(xs[1:], start=xs[0]),
|
||||
'prod': lambda xs: m.prod(xs[1:], start=xs[0]),
|
||||
'min': min,
|
||||
'max': max,
|
||||
'mean': lambda xs: Float(sum(float(x) for x in xs) / len(xs)),
|
||||
'stddev': lambda xs: (
|
||||
lambda mean: Float(
|
||||
m.sqrt(sum((float(x) - mean)**2 for x in xs) / len(xs)))
|
||||
)(sum(float(x) for x in xs) / len(xs)),
|
||||
'gmean': lambda xs: Float(m.prod(float(x) for x in xs)**(1/len(xs))),
|
||||
'gstddev': lambda xs: (
|
||||
lambda gmean: Float(
|
||||
m.exp(m.sqrt(sum(m.log(float(x)/gmean)**2 for x in xs) / len(xs)))
|
||||
if gmean else m.inf)
|
||||
)(m.prod(float(x) for x in xs)**(1/len(xs))),
|
||||
}
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# float fields
|
||||
class Float(co.namedtuple('Float', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0.0):
|
||||
if isinstance(x, Float):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = float(x)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, float), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return '%.1f' % self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = Int.none
|
||||
table = Int.table
|
||||
diff_none = Int.diff_none
|
||||
diff_table = Int.diff_table
|
||||
diff_diff = Int.diff_diff
|
||||
ratio = Int.ratio
|
||||
__add__ = Int.__add__
|
||||
__sub__ = Int.__sub__
|
||||
__mul__ = Int.__mul__
|
||||
|
||||
# fractional fields, a/b
|
||||
class Frac(co.namedtuple('Frac', 'a,b')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, a=0, b=None):
|
||||
if isinstance(a, Frac) and b is None:
|
||||
return a
|
||||
if isinstance(a, str) and b is None:
|
||||
a, b = a.split('/', 1)
|
||||
if b is None:
|
||||
b = a
|
||||
return super().__new__(cls, Int(a), Int(b))
|
||||
|
||||
def __str__(self):
|
||||
return '%s/%s' % (self.a, self.b)
|
||||
|
||||
def __float__(self):
|
||||
return float(self.a)
|
||||
|
||||
none = '%11s %7s' % ('-', '-')
|
||||
def table(self):
|
||||
t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
return '%11s %7s' % (
|
||||
self,
|
||||
'∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%.1f%%' % (100*t))
|
||||
|
||||
diff_none = '%11s' % '-'
|
||||
def diff_table(self):
|
||||
return '%11s' % (self,)
|
||||
|
||||
def diff_diff(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
return '%11s' % ('%s/%s' % (
|
||||
new_a.diff_diff(old_a).strip(),
|
||||
new_b.diff_diff(old_b).strip()))
|
||||
|
||||
def ratio(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
new = new_a.x/new_b.x if new_b.x else 1.0
|
||||
old = old_a.x/old_b.x if old_b.x else 1.0
|
||||
return new - old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.a + other.a, self.b + other.b)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.a - other.a, self.b - other.b)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.a * other.a, self.b + other.b)
|
||||
|
||||
def __lt__(self, other):
|
||||
self_t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
other_t = other.a.x/other.b.x if other.b.x else 1.0
|
||||
return (self_t, self.a.x) < (other_t, other.a.x)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.__class__.__lt__(other, self)
|
||||
|
||||
def __le__(self, other):
|
||||
return not self.__gt__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return not self.__lt__(other)
|
||||
|
||||
# available types
|
||||
TYPES = co.OrderedDict([
|
||||
('int', Int),
|
||||
('float', Float),
|
||||
('frac', Frac)
|
||||
])
|
||||
|
||||
|
||||
def infer(results, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
types={},
|
||||
ops={},
|
||||
renames=[],
|
||||
**_):
|
||||
# if fields not specified, try to guess from data
|
||||
if fields is None:
|
||||
fields = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if (by is None or k not in by) and v.strip():
|
||||
types_ = []
|
||||
for t in fields.get(k, TYPES.values()):
|
||||
try:
|
||||
t(v)
|
||||
types_.append(t)
|
||||
except ValueError:
|
||||
pass
|
||||
fields[k] = types_
|
||||
fields = list(k for k, v in fields.items() if v)
|
||||
|
||||
# deduplicate fields
|
||||
fields = list(co.OrderedDict.fromkeys(fields).keys())
|
||||
|
||||
# if by not specified, guess it's anything not in fields and not a
|
||||
# source of a rename
|
||||
if by is None:
|
||||
by = co.OrderedDict()
|
||||
for r in results:
|
||||
# also ignore None keys, these are introduced by csv.DictReader
|
||||
# when header + row mismatch
|
||||
by.update((k, True) for k in r.keys()
|
||||
if k is not None
|
||||
and k not in fields
|
||||
and not any(k == old_k for _, old_k in renames))
|
||||
by = list(by.keys())
|
||||
|
||||
# deduplicate fields
|
||||
by = list(co.OrderedDict.fromkeys(by).keys())
|
||||
|
||||
# find best type for all fields
|
||||
types_ = {}
|
||||
for k in fields:
|
||||
if k in types:
|
||||
types_[k] = types[k]
|
||||
else:
|
||||
for t in TYPES.values():
|
||||
for r in results:
|
||||
if k in r and r[k].strip():
|
||||
try:
|
||||
t(r[k])
|
||||
except ValueError:
|
||||
break
|
||||
else:
|
||||
types_[k] = t
|
||||
break
|
||||
else:
|
||||
print("error: no type matches field %r?" % k)
|
||||
sys.exit(-1)
|
||||
types = types_
|
||||
|
||||
# does folding change the type?
|
||||
types_ = {}
|
||||
for k, t in types.items():
|
||||
types_[k] = ops.get(k, OPS['sum'])([t()]).__class__
|
||||
|
||||
|
||||
# create result class
|
||||
def __new__(cls, **r):
|
||||
return cls.__mro__[1].__new__(cls,
|
||||
**{k: r.get(k, '') for k in by},
|
||||
**{k: r[k] if k in r and isinstance(r[k], list)
|
||||
else [types[k](r[k])] if k in r
|
||||
else []
|
||||
for k in fields})
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
**{k: getattr(self, k) for k in by},
|
||||
**{k: object.__getattribute__(self, k)
|
||||
+ object.__getattribute__(other, k)
|
||||
for k in fields})
|
||||
|
||||
def __getattribute__(self, k):
|
||||
if k in fields:
|
||||
if object.__getattribute__(self, k):
|
||||
return ops.get(k, OPS['sum'])(object.__getattribute__(self, k))
|
||||
else:
|
||||
return None
|
||||
return object.__getattribute__(self, k)
|
||||
|
||||
return type('Result', (co.namedtuple('Result', by + fields),), {
|
||||
'__slots__': (),
|
||||
'__new__': __new__,
|
||||
'__add__': __add__,
|
||||
'__getattribute__': __getattribute__,
|
||||
'_by': by,
|
||||
'_fields': fields,
|
||||
'_sort': fields,
|
||||
'_types': types_,
|
||||
})
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def main(csv_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], fields or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if fields is not None:
|
||||
fields = [k for k, _ in fields]
|
||||
|
||||
# figure out types
|
||||
types = {}
|
||||
for t in TYPES.keys():
|
||||
for k in args.get(t, []):
|
||||
if k in types:
|
||||
print("error: conflicting type for field %r?" % k)
|
||||
sys.exit(-1)
|
||||
types[k] = TYPES[t]
|
||||
# rename types?
|
||||
if renames:
|
||||
types_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in types:
|
||||
types_[new_k] = types[old_k]
|
||||
types.update(types_)
|
||||
|
||||
# figure out merge operations
|
||||
ops = {}
|
||||
for o in OPS.keys():
|
||||
for k in args.get(o, []):
|
||||
if k in ops:
|
||||
print("error: conflicting op for field %r?" % k)
|
||||
sys.exit(-1)
|
||||
ops[k] = OPS[o]
|
||||
# rename ops?
|
||||
if renames:
|
||||
ops_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in ops:
|
||||
ops_[new_k] = ops[old_k]
|
||||
ops.update(ops_)
|
||||
|
||||
# find CSV files
|
||||
results = []
|
||||
for path in csv_paths:
|
||||
try:
|
||||
with openio(path) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# rename fields?
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
results.append(r)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# homogenize
|
||||
Result = infer(results,
|
||||
by=by,
|
||||
fields=fields,
|
||||
types=types,
|
||||
ops=ops,
|
||||
renames=renames)
|
||||
results_ = []
|
||||
for r in results:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
continue
|
||||
try:
|
||||
results_.append(Result(**{
|
||||
k: r[k] for k in Result._by + Result._fields
|
||||
if k in r and r[k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
results = results_
|
||||
|
||||
# fold
|
||||
results = fold(Result, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else Result._sort)),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f, Result._by + Result._fields)
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
# note we need to go through getattr to resolve lazy fields
|
||||
writer.writerow({
|
||||
k: getattr(r, k) for k in Result._by + Result._fields})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# rename fields?
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(Result(**{
|
||||
k: r[k] for k in Result._by + Result._fields
|
||||
if k in r and r[k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(Result, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(Result, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by,
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements in CSV files.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'csv_paths',
|
||||
nargs='*',
|
||||
help="Input *.csv files.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k,v=None: (k, v.split(',') if v is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Group by this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k,v=None: (k, v.split(',') if v is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Show this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value. May include "
|
||||
"comma-separated options.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'--int',
|
||||
action='append',
|
||||
help="Treat these fields as ints.")
|
||||
parser.add_argument(
|
||||
'--float',
|
||||
action='append',
|
||||
help="Treat these fields as floats.")
|
||||
parser.add_argument(
|
||||
'--frac',
|
||||
action='append',
|
||||
help="Treat these fields as fractions.")
|
||||
parser.add_argument(
|
||||
'--sum',
|
||||
action='append',
|
||||
help="Add these fields (the default).")
|
||||
parser.add_argument(
|
||||
'--prod',
|
||||
action='append',
|
||||
help="Multiply these fields.")
|
||||
parser.add_argument(
|
||||
'--min',
|
||||
action='append',
|
||||
help="Take the minimum of these fields.")
|
||||
parser.add_argument(
|
||||
'--max',
|
||||
action='append',
|
||||
help="Take the maximum of these fields.")
|
||||
parser.add_argument(
|
||||
'--mean',
|
||||
action='append',
|
||||
help="Average these fields.")
|
||||
parser.add_argument(
|
||||
'--stddev',
|
||||
action='append',
|
||||
help="Find the standard deviation of these fields.")
|
||||
parser.add_argument(
|
||||
'--gmean',
|
||||
action='append',
|
||||
help="Find the geometric mean of these fields.")
|
||||
parser.add_argument(
|
||||
'--gstddev',
|
||||
action='append',
|
||||
help="Find the geometric standard deviation of these fields.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
177
kernel/fs/littlefs/scripts/tailpipe.py
Executable file
177
kernel/fs/littlefs/scripts/tailpipe.py
Executable file
@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Efficiently displays the last n lines of a file/pipe.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/tailpipe.py trace -n5
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import io
|
||||
import os
|
||||
import select
|
||||
import shutil
|
||||
import sys
|
||||
import threading as th
|
||||
import time
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
class LinesIO:
|
||||
def __init__(self, maxlen=None):
|
||||
self.maxlen = maxlen
|
||||
self.lines = co.deque(maxlen=maxlen)
|
||||
self.tail = io.StringIO()
|
||||
|
||||
# trigger automatic sizing
|
||||
if maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
def write(self, s):
|
||||
# note using split here ensures the trailing string has no newline
|
||||
lines = s.split('\n')
|
||||
|
||||
if len(lines) > 1 and self.tail.getvalue():
|
||||
self.tail.write(lines[0])
|
||||
lines[0] = self.tail.getvalue()
|
||||
self.tail = io.StringIO()
|
||||
|
||||
self.lines.extend(lines[:-1])
|
||||
|
||||
if lines[-1]:
|
||||
self.tail.write(lines[-1])
|
||||
|
||||
def resize(self, maxlen):
|
||||
self.maxlen = maxlen
|
||||
if maxlen == 0:
|
||||
maxlen = shutil.get_terminal_size((80, 5))[1]
|
||||
if maxlen != self.lines.maxlen:
|
||||
self.lines = co.deque(self.lines, maxlen=maxlen)
|
||||
|
||||
canvas_lines = 1
|
||||
def draw(self):
|
||||
# did terminal size change?
|
||||
if self.maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
# first thing first, give ourself a canvas
|
||||
while LinesIO.canvas_lines < len(self.lines):
|
||||
sys.stdout.write('\n')
|
||||
LinesIO.canvas_lines += 1
|
||||
|
||||
# clear the bottom of the canvas if we shrink
|
||||
shrink = LinesIO.canvas_lines - len(self.lines)
|
||||
if shrink > 0:
|
||||
for i in range(shrink):
|
||||
sys.stdout.write('\r')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[%dA' % shrink)
|
||||
LinesIO.canvas_lines = len(self.lines)
|
||||
|
||||
for i, line in enumerate(self.lines):
|
||||
# move cursor, clear line, disable/reenable line wrapping
|
||||
sys.stdout.write('\r')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
sys.stdout.write('\x1b[?7l')
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.write('\x1b[?7h')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main(path='-', *, lines=5, cat=False, sleep=None, keep_open=False):
|
||||
if cat:
|
||||
ring = sys.stdout
|
||||
else:
|
||||
ring = LinesIO(lines)
|
||||
|
||||
# if sleep print in background thread to avoid getting stuck in a read call
|
||||
event = th.Event()
|
||||
lock = th.Lock()
|
||||
if not cat:
|
||||
done = False
|
||||
def background():
|
||||
while not done:
|
||||
event.wait()
|
||||
event.clear()
|
||||
with lock:
|
||||
ring.draw()
|
||||
time.sleep(sleep or 0.01)
|
||||
th.Thread(target=background, daemon=True).start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
with openio(path) as f:
|
||||
for line in f:
|
||||
with lock:
|
||||
ring.write(line)
|
||||
event.set()
|
||||
|
||||
if not keep_open:
|
||||
break
|
||||
# don't just flood open calls
|
||||
time.sleep(sleep or 0.1)
|
||||
except FileNotFoundError as e:
|
||||
print("error: file not found %r" % path)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
if not cat:
|
||||
done = True
|
||||
lock.acquire() # avoids https://bugs.python.org/issue42717
|
||||
sys.stdout.write('\n')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Efficiently displays the last n lines of a file/pipe.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'path',
|
||||
nargs='?',
|
||||
help="Path to read from.")
|
||||
parser.add_argument(
|
||||
'-n', '--lines',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Show this many lines of history. 0 uses the terminal height. "
|
||||
"Defaults to 5.")
|
||||
parser.add_argument(
|
||||
'-z', '--cat',
|
||||
action='store_true',
|
||||
help="Pipe directly to stdout.")
|
||||
parser.add_argument(
|
||||
'-s', '--sleep',
|
||||
type=float,
|
||||
help="Seconds to sleep between reads. Defaults to 0.01.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Reopen the pipe on EOF, useful when multiple "
|
||||
"processes are writing.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
73
kernel/fs/littlefs/scripts/teepipe.py
Executable file
73
kernel/fs/littlefs/scripts/teepipe.py
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# tee, but for pipes
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/tee.py in_pipe out_pipe1 out_pipe2
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import os
|
||||
import io
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def main(in_path, out_paths, *, keep_open=False):
|
||||
out_pipes = [openio(p, 'wb', 0) for p in out_paths]
|
||||
try:
|
||||
with openio(in_path, 'rb', 0) as f:
|
||||
while True:
|
||||
buf = f.read(io.DEFAULT_BUFFER_SIZE)
|
||||
if not buf:
|
||||
if not keep_open:
|
||||
break
|
||||
# don't just flood reads
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
for p in out_pipes:
|
||||
try:
|
||||
p.write(buf)
|
||||
except BrokenPipeError:
|
||||
pass
|
||||
except FileNotFoundError as e:
|
||||
print("error: file not found %r" % in_path)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="tee, but for pipes.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'in_path',
|
||||
help="Path to read from.")
|
||||
parser.add_argument(
|
||||
'out_paths',
|
||||
nargs='+',
|
||||
help="Path to write to.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Reopen the pipe on EOF, useful when multiple "
|
||||
"processes are writing.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
1487
kernel/fs/littlefs/scripts/test.py
Executable file
1487
kernel/fs/littlefs/scripts/test.py
Executable file
File diff suppressed because it is too large
Load Diff
1002
kernel/fs/littlefs/scripts/tracebd.py
Executable file
1002
kernel/fs/littlefs/scripts/tracebd.py
Executable file
File diff suppressed because it is too large
Load Diff
265
kernel/fs/littlefs/scripts/watch.py
Executable file
265
kernel/fs/littlefs/scripts/watch.py
Executable file
@ -0,0 +1,265 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Traditional watch command, but with higher resolution updates and a bit
|
||||
# different options/output format
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/watch.py -s0.1 date
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import errno
|
||||
import fcntl
|
||||
import io
|
||||
import os
|
||||
import pty
|
||||
import re
|
||||
import shutil
|
||||
import struct
|
||||
import subprocess as sp
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
|
||||
try:
|
||||
import inotify_simple
|
||||
except ModuleNotFoundError:
|
||||
inotify_simple = None
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def inotifywait(paths):
|
||||
# wait for interesting events
|
||||
inotify = inotify_simple.INotify()
|
||||
flags = (inotify_simple.flags.ATTRIB
|
||||
| inotify_simple.flags.CREATE
|
||||
| inotify_simple.flags.DELETE
|
||||
| inotify_simple.flags.DELETE_SELF
|
||||
| inotify_simple.flags.MODIFY
|
||||
| inotify_simple.flags.MOVED_FROM
|
||||
| inotify_simple.flags.MOVED_TO
|
||||
| inotify_simple.flags.MOVE_SELF)
|
||||
|
||||
# recurse into directories
|
||||
for path in paths:
|
||||
if os.path.isdir(path):
|
||||
for dir, _, files in os.walk(path):
|
||||
inotify.add_watch(dir, flags)
|
||||
for f in files:
|
||||
inotify.add_watch(os.path.join(dir, f), flags)
|
||||
else:
|
||||
inotify.add_watch(path, flags)
|
||||
|
||||
# wait for event
|
||||
inotify.read()
|
||||
|
||||
class LinesIO:
|
||||
def __init__(self, maxlen=None):
|
||||
self.maxlen = maxlen
|
||||
self.lines = co.deque(maxlen=maxlen)
|
||||
self.tail = io.StringIO()
|
||||
|
||||
# trigger automatic sizing
|
||||
if maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
def write(self, s):
|
||||
# note using split here ensures the trailing string has no newline
|
||||
lines = s.split('\n')
|
||||
|
||||
if len(lines) > 1 and self.tail.getvalue():
|
||||
self.tail.write(lines[0])
|
||||
lines[0] = self.tail.getvalue()
|
||||
self.tail = io.StringIO()
|
||||
|
||||
self.lines.extend(lines[:-1])
|
||||
|
||||
if lines[-1]:
|
||||
self.tail.write(lines[-1])
|
||||
|
||||
def resize(self, maxlen):
|
||||
self.maxlen = maxlen
|
||||
if maxlen == 0:
|
||||
maxlen = shutil.get_terminal_size((80, 5))[1]
|
||||
if maxlen != self.lines.maxlen:
|
||||
self.lines = co.deque(self.lines, maxlen=maxlen)
|
||||
|
||||
canvas_lines = 1
|
||||
def draw(self):
|
||||
# did terminal size change?
|
||||
if self.maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
# first thing first, give ourself a canvas
|
||||
while LinesIO.canvas_lines < len(self.lines):
|
||||
sys.stdout.write('\n')
|
||||
LinesIO.canvas_lines += 1
|
||||
|
||||
# clear the bottom of the canvas if we shrink
|
||||
shrink = LinesIO.canvas_lines - len(self.lines)
|
||||
if shrink > 0:
|
||||
for i in range(shrink):
|
||||
sys.stdout.write('\r')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[%dA' % shrink)
|
||||
LinesIO.canvas_lines = len(self.lines)
|
||||
|
||||
for i, line in enumerate(self.lines):
|
||||
# move cursor, clear line, disable/reenable line wrapping
|
||||
sys.stdout.write('\r')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
sys.stdout.write('\x1b[?7l')
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.write('\x1b[?7h')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main(command, *,
|
||||
lines=0,
|
||||
cat=False,
|
||||
sleep=None,
|
||||
keep_open=False,
|
||||
keep_open_paths=None,
|
||||
exit_on_error=False):
|
||||
returncode = 0
|
||||
try:
|
||||
while True:
|
||||
# reset ring each run
|
||||
if cat:
|
||||
ring = sys.stdout
|
||||
else:
|
||||
ring = LinesIO(lines)
|
||||
|
||||
try:
|
||||
# run the command under a pseudoterminal
|
||||
mpty, spty = pty.openpty()
|
||||
|
||||
# forward terminal size
|
||||
w, h = shutil.get_terminal_size((80, 5))
|
||||
if lines:
|
||||
h = lines
|
||||
fcntl.ioctl(spty, termios.TIOCSWINSZ,
|
||||
struct.pack('HHHH', h, w, 0, 0))
|
||||
|
||||
proc = sp.Popen(command,
|
||||
stdout=spty,
|
||||
stderr=spty,
|
||||
close_fds=False)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
break
|
||||
if not line:
|
||||
break
|
||||
|
||||
ring.write(line)
|
||||
if not cat:
|
||||
ring.draw()
|
||||
|
||||
mpty.close()
|
||||
proc.wait()
|
||||
if exit_on_error and proc.returncode != 0:
|
||||
returncode = proc.returncode
|
||||
break
|
||||
except OSError as e:
|
||||
if e.errno != errno.ETXTBSY:
|
||||
raise
|
||||
pass
|
||||
|
||||
# try to inotifywait
|
||||
if keep_open and inotify_simple is not None:
|
||||
if keep_open_paths:
|
||||
paths = set(keep_paths)
|
||||
else:
|
||||
# guess inotify paths from command
|
||||
paths = set()
|
||||
for p in command:
|
||||
for p in {
|
||||
p,
|
||||
re.sub('^-.', '', p),
|
||||
re.sub('^--[^=]+=', '', p)}:
|
||||
if p and os.path.exists(p):
|
||||
paths.add(p)
|
||||
ptime = time.time()
|
||||
inotifywait(paths)
|
||||
# sleep for a minimum amount of time, this helps issues around
|
||||
# rapidly updating files
|
||||
time.sleep(max(0, (sleep or 0.1) - (time.time()-ptime)))
|
||||
else:
|
||||
time.sleep(sleep or 0.1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
if not cat:
|
||||
sys.stdout.write('\n')
|
||||
sys.exit(returncode)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Traditional watch command, but with higher resolution "
|
||||
"updates and a bit different options/output format.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'command',
|
||||
nargs=argparse.REMAINDER,
|
||||
help="Command to run.")
|
||||
parser.add_argument(
|
||||
'-n', '--lines',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Show this many lines of history. 0 uses the terminal height. "
|
||||
"Defaults to 0.")
|
||||
parser.add_argument(
|
||||
'-z', '--cat',
|
||||
action='store_true',
|
||||
help="Pipe directly to stdout.")
|
||||
parser.add_argument(
|
||||
'-s', '--sleep',
|
||||
type=float,
|
||||
help="Seconds to sleep between runs. Defaults to 0.1.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Try to use inotify to wait for changes.")
|
||||
parser.add_argument(
|
||||
'-K', '--keep-open-path',
|
||||
dest='keep_open_paths',
|
||||
action='append',
|
||||
help="Use this path for inotify. Defaults to guessing.")
|
||||
parser.add_argument(
|
||||
'-e', '--exit-on-error',
|
||||
action='store_true',
|
||||
help="Exit on error.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_args()).items()
|
||||
if v is not None}))
|
Reference in New Issue
Block a user