Skip to content

Instantly share code, notes, and snippets.

@serrasqueiro
Forked from akanakia/reindentv2.py
Last active March 26, 2023 10:58
Show Gist options
  • Save serrasqueiro/31e19db1dba146e512e4ea39c2c76279 to your computer and use it in GitHub Desktop.
Save serrasqueiro/31e19db1dba146e512e4ea39c2c76279 to your computer and use it in GitHub Desktop.
A slightly more versatile reindent.py
# reindent.py -- Tim Peters, and H.Moreira
# pylint: disable=missing-function-docstring, trailing-whitespace, line-too-long, invalid-name, unused-argument
# Re-named to reindent.py and enhanced by Henrique Moreira, 01 Dec 2019.
# Released to the public domain, by Anshul Kanakia, 22 May 2019 as reindentv2.py
# Original code reindent.py released to the public domain, by Tim Peters, 03 October 2000.
# This version is modified to allow custom indent space values (not just 4 spaces).
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
-v (--verbose) Verbose. Print informative msgs; else no output.
(--newline) Newline. Specify the newline character to use (CRLF, LF).
Default is the same as the original file.
(--spaces) Spaces. Specify the number of spaces to use for indent
(default = @@/1/).
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use x-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
The backup file is a copy of the one that is being reindented. The ".bak"
file is generated with shutil.copy(), but some corner cases regarding
user/group and permissions could leave the backup file more readable than
you'd prefer. You can always use the --nobackup option to prevent this.
(c) 2020 Henrique Moreira
at: https://gist.github.com/serrasqueiro/31e19db1dba146e512e4ea39c2c76279.git
"""
__version__ = "2.10 02"
import tokenize
import os
import shutil
import sys
import getopt
moduleName = "reindent"
verbose = False
recurse = False
dryrun = False
makebackup = True
# A specified newline to be used in the output (set by --newline option)
spec_newline = None
correct_spaces = 4
def main():
code = main_script(sys.stdin, sys.stdout, sys.stderr)
assert isinstance(code, int)
sys.exit( code )
def main_script (inStream, outStream, errFile):
global verbose, recurse, dryrun, makebackup, spec_newline
code = 0
debug = 0
outFile = outStream
spaces = correct_spaces
cOpts = {"spaces": spaces,
"debug": debug,
"details": 1,
"@issues": dict(),
}
strHelp = __doc__.replace( "@@/1/", str(spaces) )
try:
opts, args = getopt.getopt(
sys.argv[1:], "drnvqh",
["version",
"dryrun", "recurse", "nobackup", "verbose",
"quiet",
"newline=", "spaces=",
"debug=",
"help"])
except getopt.error as msg:
usage(msg)
for op, a in opts:
if op in ('-d', '--dryrun'):
dryrun = True
elif op in ('-r', '--recurse'):
recurse = True
elif op in ('-n', '--nobackup'):
makebackup = False
elif op in ('-v', '--verbose'):
verbose = True
elif op in ('-q', '--quiet'):
cOpts[ "details" ] = 0
elif op in ('--newline',):
if not a.upper() in ('CRLF', 'LF'):
usage( strHelp )
spec_newline = dict(CRLF='\r\n', LF='\n')[a.upper()]
elif op in ('--spaces',):
try:
spaces = int(a)
assert spaces in (2, 4, 6)
except:
usage( strHelp )
elif op in ('--version',):
print(moduleName, __version__)
usage( None, None )
elif op in ('--debug',):
try:
debug = int(a)
assert debug in (0, 1, 6, 9)
except:
usage( strHelp )
elif op in ('-h', '--help'):
usage( strHelp )
# Consolidate args
cOpts[ "debug" ] = debug
if not args:
r = Reindenter(inStream, spaces, debug)
r.run()
r.write(outFile)
else:
for arg in args:
changed = check(arg, cOpts)
if changed is None:
errFile.write("{}Cowardly exiting after error: {}\n".format( "\n", arg ))
return 4
if verbose:
if changed:
print("Changed." if not dryrun else "Would have changed.", file=errFile)
else:
print("Unchanged.", file=errFile)
if changed:
code = 1
return code
def usage (msg=None, toFile=sys.stdout):
if msg is None:
msg = __doc__
elif toFile is not None:
print(msg, file=toFile)
sys.exit( 0 )
def errprint (*args):
sys.stderr.write(" ".join(str(arg) for arg in args))
sys.stderr.write("\n")
def check (file, cOpts):
outFile = sys.stdout
errFile = sys.stderr
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("listing directory", file)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (recurse and os.path.isdir(fullname) and not os.path.islink(fullname) and not os.path.split(fullname)[1].startswith(".")) or name.lower().endswith(".py"):
check(fullname, cOpts)
return None
spaces = cOpts[ "spaces" ]
details = cOpts[ "details" ]
debug = cOpts[ "debug" ]
isQuiet = details<=0
if isQuiet:
outFile = None
if verbose:
print("checking", file, "...", end=' ', file=errFile)
with open(file, 'rb') as f:
try:
encoding, _ = tokenize.detect_encoding(f.readline)
except SyntaxError as se:
errprint("%s: SyntaxError: %s" % (file, str(se)))
return None
try:
with open(file, encoding=encoding) as f:
r = Reindenter(f, spaces, debug)
except IOError as msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return None
newline = spec_newline if spec_newline else r.newlines
if isinstance(newline, tuple):
errprint("%s: mixed newlines detected; cannot continue without --newline" % file)
return None
didChange = r.run()
if r.flaws:
for idx, tup in enumerate(r.flaws, 1):
line_nr, line = tup
print(f"Flaw# {idx}/{len(r.flaws)}, line {line_nr}: {line}", end="\n\n")
errFile.write(f"Flaws len={len(r.flaws)}\n")
assert False, f"At least one flaw: {len(r.flaws)}"
if didChange:
if dryrun:
if outFile is not None:
r.write(outFile)
else:
bak = file + ".bak"
if makebackup:
shutil.copyfile(file, bak)
if verbose:
print("backed up", file, "to", bak, file=errFile)
with open(file, "w", encoding=encoding, newline=newline) as f:
r.write(f)
if verbose:
print("wrote new", file, file=errFile)
return True
return didChange
def _rstrip(line, JUNK='\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
"""
i = len(line)
while i > 0 and line[i - 1] in JUNK:
i -= 1
return line[:i]
class IndentEcho:
""" abstract CLASS IndentEcho -- helper, or statistics """
debug = 0
flaws = []
def init_indent_echo (self, debug):
self.debug = debug
self.flaws = list()
def add_flaw (self, s, lineNumber=-1):
self.flaws.append( (lineNumber, s) )
class Reindenter(IndentEcho):
""" Re-indent text """
find_stmt = -1
level = 0
raw, after = [], []
def __init__ (self, strOrStream, spaces, debug=0):
self.init_indent_echo( debug )
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines
if isinstance(strOrStream, str):
assert strOrStream.find("\r") == -1 # No carriage-return in strings, please!
self.raw = strOrStream.split("\n")
else:
inStream = strOrStream
self.raw = inStream.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [_rstrip(line).expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
# Save the newlines found in the file so they can be used to
# create output without mutating the newlines.
try:
self.newlines = inStream.newlines
except:
self.newlines = "\n"
# Set the number of indent spaces
self.spaces = spaces
def run (self):
tokens = tokenize.generate_tokens(self.getline)
for _token in tokens:
self.tokeneater(*_token)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
aLine = lines[thisstmt]
if self.debug > 0:
print("#Debug: i={}, thisstmt={}, thislevel={}".format( i, thisstmt, thislevel ))
print("{}{}".format( aLine if aLine else "", "." if aLine else "<null>\n" ))
nextstmt = stats[i + 1][0]
have = getlspace(lines[thisstmt])
want = thislevel * self.spaces
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * self.spaces
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + (getlspace(after[jline - 1]) -
getlspace(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
lineBlock = lines[thisstmt:nextstmt]
if diff == 0 or have == 0:
after.extend(lineBlock)
else:
sRaw = "".join( lineBlock )
tripleQuotes = split_step( sRaw.replace("\n", "\\n"), '"""' )
nTriples = tripleQuotes.count( '"""' )
if nTriples > 0:
assert nTriples >= 2
if nTriples>2:
self.add_flaw( tripleQuotes, thisstmt )
if self.debug > 0:
print("Debug: tripleQuotes ({0}) >>> {1} <<<\n".format( nTriples, tripleQuotes ))
tripState = 0
for line in lineBlock:
if diff > 0:
if line == "\n":
after.append(line)
else:
if tripState!=0:
headBlanks = ""
else:
headBlanks = " " * diff
after.append(headBlanks + line)
if nTriples > 0:
n = line.count( '"""' )
if n>=2:
self.add_flaw( tripleQuotes+["(STRANGE triple quotes)"], thisstmt )
elif n==1:
tripState = int( tripState==0 )
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write (self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline (self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater (self, a_type, token, slinecol, end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if a_type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif a_type == INDENT:
self.find_stmt = 1
self.level += 1
elif a_type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif a_type == COMMENT:
if self.find_stmt:
self.stats.append((slinecol[0], -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif a_type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((slinecol[0], self.level))
return True
class AnyClassTest:
tests = []
""" abstract CLASS AnyClassTest """
def init_AnyClassTest (self, tests=None):
doTests = [] if tests is None else tests
if isinstance(tests, (tuple, list)):
for a in tests:
s = a.replace( "@@@", '\"\"\"' )
doTests.append( s )
else:
assert False
self.tests = doTests
self.results = []
class CLASS_Test_Reindenter(AnyClassTest):
""" Re-indenter class """
def __init__ (self, sample):
self.blanks = 4
self.init_AnyClassTest( (sample,) )
self.run()
def dump (self):
for a in self.tests:
print("{}".format( a ))
def run (self):
for a in self.tests:
x = Reindenter(a, self.blanks, 1)
x.run()
self.results.append( x.after )
return True
def getlspace (line):
""" Count number of leading blanks. """
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
#
def split_step (s, splitBy, stopWhen=-1):
""" splits, but keeps 'splitBy' """
res = []
assert isinstance(splitBy, str)
if splitBy=="":
return res
if isinstance(s, str):
while True:
pos = s.find( splitBy )
if pos<0:
break
rem = s[ pos+len(splitBy): ]
left = s[ :pos ]
res.append( left )
res.append( splitBy )
s = rem
else:
assert False
return res
#
# Main script
#
if __name__ == '__main__':
# forked from https://gist.github.com/akanakia/6d287b38145757ee76c90dc943fb0341
main()
# LIXO
if True:
docInit = """Abc
Def @line3
Ghi @line4
"""
pass
print("Bye!")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment