mirror of
https://github.com/apidoorman/doorman.git
synced 2026-02-09 02:29:42 -06:00
code cleanup
This commit is contained in:
6
.gitignore
vendored
6
.gitignore
vendored
@@ -55,3 +55,9 @@ env/
|
||||
**/venv/
|
||||
**/env/
|
||||
backend-services/platform-logs/doorman.log
|
||||
|
||||
# Local helper scripts (never commit)
|
||||
scripts/cleanup_inline_comments.py
|
||||
scripts/style_unify_python.py
|
||||
scripts/add_route_docblocks.py
|
||||
scripts/dedupe_docblocks.py
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import re
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
|
||||
ROUTE_DECORATOR_RE = re.compile(r"^\s*@\s*\w*_router\.")
|
||||
DEF_RE = re.compile(r'^\s*async\s+def\s+\w+\s*\(')
|
||||
|
||||
|
||||
def extract_description(block: str):
|
||||
# Find description='...' or "..."
|
||||
m = re.search(r"description\s*=\s*(['\"])(.*?)\1", block, re.S)
|
||||
if m:
|
||||
return m.group(2).strip()
|
||||
return None
|
||||
|
||||
|
||||
def has_docblock_before(lines: list[str], start_index: int) -> bool:
|
||||
# Look immediately above the decorator block for a triple-quoted block
|
||||
i = start_index - 1
|
||||
while i >= 0 and lines[i].strip() == "":
|
||||
i -= 1
|
||||
if i >= 0:
|
||||
s = lines[i].strip()
|
||||
return s.startswith('"""') or s.startswith("'''")
|
||||
return False
|
||||
|
||||
|
||||
def build_docblock(desc: str) -> list[str]:
|
||||
return [
|
||||
'"""\n',
|
||||
f"{desc}\n",
|
||||
'\n',
|
||||
'Request:\n',
|
||||
'{}\n',
|
||||
'Response:\n',
|
||||
'{}\n',
|
||||
'"""\n',
|
||||
'\n',
|
||||
]
|
||||
|
||||
|
||||
def process_file(path: Path) -> bool:
|
||||
text = path.read_text(encoding='utf-8')
|
||||
lines = text.splitlines(True)
|
||||
i = 0
|
||||
changed = False
|
||||
n = len(lines)
|
||||
while i < n:
|
||||
if ROUTE_DECORATOR_RE.match(lines[i]):
|
||||
# capture decorator block (may span lines until a line that does not start with @ or whitespace/closing paren)
|
||||
start = i
|
||||
# consume consecutive decorator lines and following paren block if needed
|
||||
# We'll continue until we hit a line that starts with 'async def'
|
||||
j = i + 1
|
||||
while j < n and not DEF_RE.match(lines[j]):
|
||||
j += 1
|
||||
# now j points to async def (or EOF)
|
||||
if j < n:
|
||||
# If a misplaced docblock exists between decorators and def, relocate it above decorators
|
||||
k = start
|
||||
moved = False
|
||||
while k < j:
|
||||
if lines[k].lstrip().startswith(('"""', "'''")):
|
||||
# find end of triple-quoted block
|
||||
quote = '"""' if lines[k].lstrip().startswith('"""') else "'''"
|
||||
m = k + 1
|
||||
while m < j and quote not in lines[m]:
|
||||
m += 1
|
||||
if m < j:
|
||||
block = lines[k:m+1]
|
||||
del lines[k:m+1]
|
||||
lines[start:start] = block + ['\n']
|
||||
# adjust indices after insertion/removal
|
||||
shift = (m+1 - k)
|
||||
j -= shift
|
||||
n = len(lines)
|
||||
moved = True
|
||||
break
|
||||
else:
|
||||
break
|
||||
k += 1
|
||||
|
||||
if not has_docblock_before(lines, start):
|
||||
desc = extract_description(''.join(lines[start:j])) or 'Endpoint'
|
||||
doc = build_docblock(desc)
|
||||
lines[start:start] = doc
|
||||
n = len(lines)
|
||||
i = j + len(doc) + 1
|
||||
changed = True
|
||||
continue
|
||||
else:
|
||||
i = j + 1
|
||||
continue
|
||||
else:
|
||||
break
|
||||
i += 1
|
||||
if changed:
|
||||
path.write_text(''.join(lines), encoding='utf-8')
|
||||
return changed
|
||||
|
||||
|
||||
def main(paths):
|
||||
targets = []
|
||||
for p in paths:
|
||||
pth = Path(p)
|
||||
if pth.is_dir():
|
||||
for f in pth.rglob('*.py'):
|
||||
if 'routes' not in f.parts:
|
||||
continue
|
||||
if any(part in {'.git', 'venv', '.venv', '__pycache__', 'generated'} for part in f.parts):
|
||||
continue
|
||||
targets.append(f)
|
||||
elif pth.suffix == '.py':
|
||||
targets.append(pth)
|
||||
for f in sorted(set(targets)):
|
||||
if process_file(f):
|
||||
print(f"docblock: {f}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = sys.argv[1:] or ['backend-services/routes']
|
||||
main(args)
|
||||
@@ -1,167 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import re
|
||||
import tokenize
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
|
||||
def strip_inline_comments_python(code: str) -> str:
|
||||
out_tokens = []
|
||||
try:
|
||||
tok_iter = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline)
|
||||
for tok in tok_iter:
|
||||
if tok.type == tokenize.COMMENT:
|
||||
|
||||
if tok.start[1] == 0:
|
||||
out_tokens.append(tok)
|
||||
|
||||
else:
|
||||
out_tokens.append(tok)
|
||||
new_code = tokenize.untokenize(out_tokens).decode("utf-8")
|
||||
except Exception:
|
||||
|
||||
new_lines = []
|
||||
for line in code.splitlines(True):
|
||||
if '#' in line:
|
||||
|
||||
idx = line.find('#')
|
||||
if idx > 0 and line[:idx].strip():
|
||||
line = line[:idx].rstrip() + ("\n" if line.endswith("\n") else "")
|
||||
new_lines.append(line)
|
||||
new_code = ''.join(new_lines)
|
||||
|
||||
lines = new_code.splitlines()
|
||||
collapsed = []
|
||||
blank_run = 0
|
||||
for ln in lines:
|
||||
if ln.strip() == "":
|
||||
blank_run += 1
|
||||
if blank_run <= 1:
|
||||
collapsed.append("")
|
||||
else:
|
||||
blank_run = 0
|
||||
collapsed.append(ln.rstrip())
|
||||
return "\n".join(collapsed) + ("\n" if new_code.endswith("\n") else "")
|
||||
|
||||
def strip_inline_comments_ts(code: str) -> str:
|
||||
def remove_trailing_line_comment(s: str) -> str:
|
||||
i = 0
|
||||
n = len(s)
|
||||
in_single = False
|
||||
in_double = False
|
||||
in_backtick = False
|
||||
escape = False
|
||||
while i < n:
|
||||
ch = s[i]
|
||||
if escape:
|
||||
escape = False
|
||||
i += 1
|
||||
continue
|
||||
if ch == "\\":
|
||||
escape = True
|
||||
i += 1
|
||||
continue
|
||||
if not (in_single or in_double or in_backtick):
|
||||
if ch == '"':
|
||||
in_double = True
|
||||
elif ch == "'":
|
||||
in_single = True
|
||||
elif ch == "`":
|
||||
in_backtick = True
|
||||
elif ch == "/" and i + 1 < n and s[i + 1] == "/":
|
||||
|
||||
prefix = s[:i]
|
||||
if prefix.strip() == "":
|
||||
|
||||
return s
|
||||
else:
|
||||
|
||||
return prefix.rstrip()
|
||||
elif ch == "/" and i + 1 < n and s[i + 1] == "*":
|
||||
|
||||
end = s.find("*/", i + 2)
|
||||
if end != -1:
|
||||
prefix = s[:i]
|
||||
suffix = s[end + 2:]
|
||||
if prefix.strip():
|
||||
|
||||
s = (prefix.rstrip() + (" " if suffix and suffix.strip().startswith(('+','-','*','/')) else "") + suffix.lstrip())
|
||||
|
||||
n = len(s)
|
||||
i = len(prefix)
|
||||
continue
|
||||
else:
|
||||
|
||||
return s
|
||||
|
||||
else:
|
||||
if in_double and ch == '"':
|
||||
in_double = False
|
||||
elif in_single and ch == "'":
|
||||
in_single = False
|
||||
elif in_backtick and ch == "`":
|
||||
in_backtick = False
|
||||
i += 1
|
||||
return s.rstrip()
|
||||
|
||||
processed = []
|
||||
for line in code.splitlines(True):
|
||||
|
||||
newline = "\n" if line.endswith("\n") else ("\r\n" if line.endswith("\r\n") else "")
|
||||
core = line[:-len(newline)] if newline else line
|
||||
processed.append(remove_trailing_line_comment(core) + newline)
|
||||
|
||||
lines = ''.join(processed).splitlines()
|
||||
collapsed = []
|
||||
blank_run = 0
|
||||
for ln in lines:
|
||||
stripped = ln.strip()
|
||||
# Remove stray JSX placeholders left by comment removal
|
||||
if stripped == "{}":
|
||||
continue
|
||||
if stripped == "":
|
||||
blank_run += 1
|
||||
if blank_run <= 1:
|
||||
collapsed.append("")
|
||||
else:
|
||||
blank_run = 0
|
||||
collapsed.append(ln.rstrip())
|
||||
return "\n".join(collapsed) + ("\n" if processed and processed[-1].endswith("\n") else "")
|
||||
|
||||
def main(paths):
|
||||
exts_py = {".py"}
|
||||
exts_ts = {".ts", ".tsx"}
|
||||
touched = []
|
||||
skip_dirs = {"node_modules", "venv", ".venv", ".git", "dist", "build"}
|
||||
for p in paths:
|
||||
path = Path(p)
|
||||
if path.is_dir():
|
||||
for f in path.rglob("*"):
|
||||
if not f.is_file():
|
||||
continue
|
||||
# Skip files inside ignored directories
|
||||
parts = set(part for part in f.parts)
|
||||
if parts & skip_dirs:
|
||||
continue
|
||||
if f.suffix in exts_py | exts_ts:
|
||||
touched.append(f)
|
||||
else:
|
||||
if path.suffix in exts_py | exts_ts:
|
||||
touched.append(path)
|
||||
|
||||
for f in sorted(set(touched)):
|
||||
try:
|
||||
original = f.read_text(encoding="utf-8")
|
||||
except Exception:
|
||||
continue
|
||||
if f.suffix in exts_py:
|
||||
cleaned = strip_inline_comments_python(original)
|
||||
else:
|
||||
cleaned = strip_inline_comments_ts(original)
|
||||
if cleaned != original:
|
||||
f.write_text(cleaned, encoding="utf-8")
|
||||
print(f"cleaned: {f}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = sys.argv[1:] or ["."]
|
||||
main(args)
|
||||
@@ -1,82 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def dedupe_file(path: Path) -> bool:
|
||||
text = path.read_text(encoding='utf-8')
|
||||
lines = text.splitlines(True)
|
||||
out = []
|
||||
i = 0
|
||||
n = len(lines)
|
||||
changed = False
|
||||
while i < n:
|
||||
line = lines[i]
|
||||
if line.lstrip().startswith(('"""', "'''")):
|
||||
# capture this docblock
|
||||
quote = '"""' if line.lstrip().startswith('"""') else "'''"
|
||||
j = i + 1
|
||||
while j < n and quote not in lines[j]:
|
||||
j += 1
|
||||
if j >= n:
|
||||
# unclosed; copy through
|
||||
out.append(line)
|
||||
i += 1
|
||||
continue
|
||||
block = lines[i:j+1]
|
||||
# check for immediate duplicate docblock following (ignoring blank lines)
|
||||
k = j + 1
|
||||
blanks = []
|
||||
while k < n and lines[k].strip() == '':
|
||||
blanks.append(lines[k])
|
||||
k += 1
|
||||
if k < n and lines[k].lstrip().startswith(('"""', "'''")):
|
||||
# drop the second consecutive docblock; keep only one and a single blank line
|
||||
out.extend(block)
|
||||
out.append('\n')
|
||||
# skip the duplicate block by advancing k to its end
|
||||
q2 = '"""' if lines[k].lstrip().startswith('"""') else "'''"
|
||||
m = k + 1
|
||||
while m < n and q2 not in lines[m]:
|
||||
m += 1
|
||||
if m < n:
|
||||
i = m + 1
|
||||
changed = True
|
||||
continue
|
||||
# normal path: emit block and continue
|
||||
out.extend(block)
|
||||
i = j + 1
|
||||
# preserve a single blank line after a docblock
|
||||
if i < n and lines[i].strip() == '':
|
||||
out.append('\n')
|
||||
while i < n and lines[i].strip() == '':
|
||||
i += 1
|
||||
continue
|
||||
out.append(line)
|
||||
i += 1
|
||||
new = ''.join(out)
|
||||
if new != text:
|
||||
path.write_text(new, encoding='utf-8')
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main(paths):
|
||||
targets = []
|
||||
for p in paths:
|
||||
pth = Path(p)
|
||||
if pth.is_dir():
|
||||
for f in pth.rglob('*.py'):
|
||||
if 'routes' in f.parts:
|
||||
targets.append(f)
|
||||
elif pth.suffix == '.py':
|
||||
targets.append(pth)
|
||||
for f in sorted(set(targets)):
|
||||
if dedupe_file(f):
|
||||
print(f'deduped: {f}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = sys.argv[1:] or ['backend-services/routes']
|
||||
main(args)
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import re
|
||||
import tokenize
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
|
||||
INTERNAL_PREFIXES = {
|
||||
"models",
|
||||
"services",
|
||||
"routes",
|
||||
"utils",
|
||||
"doorman",
|
||||
"generated",
|
||||
}
|
||||
|
||||
|
||||
def normalize_quotes(code: str) -> str:
|
||||
out = []
|
||||
try:
|
||||
tokens = list(tokenize.tokenize(BytesIO(code.encode("utf-8")).readline))
|
||||
for tok in tokens:
|
||||
if tok.type == tokenize.STRING:
|
||||
s = tok.string
|
||||
# Skip triple-quoted strings (likely docstrings)
|
||||
low = s.lower()
|
||||
if "'''" in s or '"""' in s:
|
||||
out.append(tok)
|
||||
continue
|
||||
# Extract prefix (e.g., r, u, f, fr)
|
||||
m = re.match(r"^([rubfRUBF]*)([\'\"])(.*)([\'\"])$", s, re.S)
|
||||
if not m:
|
||||
out.append(tok)
|
||||
continue
|
||||
prefix, q1, body, q2 = m.groups()
|
||||
if q1 != q2:
|
||||
out.append(tok)
|
||||
continue
|
||||
quote = q1
|
||||
# Only convert double to single when safe (no single quotes inside)
|
||||
if quote == '"' and "'" not in body:
|
||||
new_s = f"{prefix}'{body}'"
|
||||
out.append(tokenize.TokenInfo(tok.type, new_s, tok.start, tok.end, tok.line))
|
||||
else:
|
||||
out.append(tok)
|
||||
else:
|
||||
out.append(tok)
|
||||
new_code = tokenize.untokenize(out).decode("utf-8")
|
||||
return new_code
|
||||
except Exception:
|
||||
return code
|
||||
|
||||
|
||||
def reorder_imports_with_headers(lines: list[str]) -> list[str]:
|
||||
i = 0
|
||||
n = len(lines)
|
||||
|
||||
# Skip shebang/encoding
|
||||
while i < n and (lines[i].lstrip().startswith('#!') or lines[i].lstrip().startswith('# -*-')):
|
||||
i += 1
|
||||
|
||||
# Preserve initial blank lines
|
||||
start = i
|
||||
while i < n and lines[i].strip() == "":
|
||||
i += 1
|
||||
|
||||
# Skip initial module docstring (triple quotes)
|
||||
doc_start = i
|
||||
if i < n and lines[i].lstrip().startswith(('"""', "'''")):
|
||||
quote = '"""' if lines[i].lstrip().startswith('"""') else "'''"
|
||||
i += 1
|
||||
while i < n and quote not in lines[i]:
|
||||
i += 1
|
||||
if i < n:
|
||||
i += 1
|
||||
# Skip any following blank lines
|
||||
while i < n and lines[i].strip() == "":
|
||||
i += 1
|
||||
|
||||
import_start = i
|
||||
# Collect contiguous import block
|
||||
imports = []
|
||||
while i < n and (
|
||||
lines[i].lstrip().startswith('import ')
|
||||
or lines[i].lstrip().startswith('from ')
|
||||
or lines[i].strip().startswith('#')
|
||||
or lines[i].strip() == ''
|
||||
):
|
||||
imports.append(lines[i])
|
||||
i += 1
|
||||
|
||||
if not imports:
|
||||
return lines
|
||||
|
||||
# Split imports into actual import lines, ignore existing section comments
|
||||
import_lines = [ln for ln in imports if ln.lstrip().startswith(('import ', 'from '))]
|
||||
|
||||
def classify(line: str) -> str:
|
||||
s = line.strip()
|
||||
if s.startswith('from '):
|
||||
mod = s[5:].split('import', 1)[0].strip()
|
||||
if mod.startswith('.'):
|
||||
return 'internal'
|
||||
elif s.startswith('import '):
|
||||
mod = s[7:].split(' as ', 1)[0].split(',', 1)[0].strip()
|
||||
else:
|
||||
return 'other'
|
||||
root = mod.split('.')[0]
|
||||
return 'internal' if root in INTERNAL_PREFIXES else 'external'
|
||||
|
||||
external = []
|
||||
internal = []
|
||||
for ln in import_lines:
|
||||
(internal if classify(ln) == 'internal' else external).append(ln.rstrip())
|
||||
|
||||
# De-duplicate while preserving ordering
|
||||
def dedupe(seq):
|
||||
seen = set()
|
||||
out = []
|
||||
for item in seq:
|
||||
if item not in seen:
|
||||
seen.add(item)
|
||||
out.append(item)
|
||||
return out
|
||||
|
||||
external = dedupe(external)
|
||||
internal = dedupe(internal)
|
||||
|
||||
new_block = []
|
||||
if external:
|
||||
new_block.append('# External imports')
|
||||
new_block.extend(external)
|
||||
if internal:
|
||||
if new_block:
|
||||
new_block.append('')
|
||||
new_block.append('# Internal imports')
|
||||
new_block.extend(internal)
|
||||
new_block.append('')
|
||||
|
||||
# Reconstruct file
|
||||
result = []
|
||||
result.extend(lines[:import_start])
|
||||
result.extend([ln + ('' if ln.endswith('\n') else '\n') for ln in new_block])
|
||||
# Skip original imports and any trailing blank lines in that region
|
||||
j = i
|
||||
while j < n and lines[j].strip() == "":
|
||||
j += 1
|
||||
result.extend(lines[j:])
|
||||
return result
|
||||
|
||||
|
||||
def process_file(path: Path):
|
||||
try:
|
||||
text = path.read_text(encoding='utf-8')
|
||||
except Exception:
|
||||
return False
|
||||
# Normalize quotes first
|
||||
new_text = normalize_quotes(text)
|
||||
# Reorder imports with headers
|
||||
new_lines = reorder_imports_with_headers(new_text.splitlines(True))
|
||||
final = ''.join(new_lines)
|
||||
if final != text:
|
||||
path.write_text(final, encoding='utf-8')
|
||||
print(f"styled: {path}")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main(paths):
|
||||
touched = []
|
||||
for p in paths:
|
||||
pth = Path(p)
|
||||
if pth.is_dir():
|
||||
for f in pth.rglob('*.py'):
|
||||
# Skip common non-source dirs
|
||||
if any(part in {'.git', 'venv', '.venv', '__pycache__', 'generated'} for part in f.parts):
|
||||
continue
|
||||
touched.append(f)
|
||||
elif pth.suffix == '.py':
|
||||
touched.append(pth)
|
||||
for f in sorted(set(touched)):
|
||||
process_file(f)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = sys.argv[1:] or ['backend-services']
|
||||
main(args)
|
||||
Reference in New Issue
Block a user