|
| 1 | +import os |
| 2 | +import ast |
| 3 | +import doctest |
| 4 | +import coverage |
| 5 | +import importlib |
| 6 | +import sys |
| 7 | +import io |
| 8 | +import subprocess |
| 9 | + |
| 10 | +def has_doctest(file_path): |
| 11 | + """Check if a Python file contains doctest examples.""" |
| 12 | + try: |
| 13 | + with open(file_path, "r", encoding="utf-8") as f: |
| 14 | + tree = ast.parse(f.read(), filename=file_path) |
| 15 | + for node in ast.walk(tree): |
| 16 | + if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Module)): |
| 17 | + docstring = ast.get_docstring(node) |
| 18 | + if docstring and ">>>" in docstring: |
| 19 | + return True |
| 20 | + except Exception as e: |
| 21 | + print(f"⚠️ Error parsing {file_path}: {e}") |
| 22 | + return False |
| 23 | + |
| 24 | +def run_doctest_with_coverage(file_path, project_root, filename): |
| 25 | + print(f"\n📄 Running doctests in: {file_path}") |
| 26 | + |
| 27 | + if project_root not in sys.path: |
| 28 | + sys.path.insert(0, project_root) |
| 29 | + |
| 30 | + rel_path = os.path.relpath(file_path, project_root) |
| 31 | + module_name = rel_path.replace(os.sep, ".").rstrip(".py") |
| 32 | + |
| 33 | + cov = coverage.Coverage(source=[os.path.dirname(file_path)], include=[file_path]) |
| 34 | + cov.start() |
| 35 | + |
| 36 | + try: |
| 37 | + module = importlib.import_module(module_name) |
| 38 | + result = doctest.testmod(module) |
| 39 | + except Exception as e: |
| 40 | + print(f"❌ Import failed for {file_path}: {e}") |
| 41 | + cov.stop() |
| 42 | + cov.save() |
| 43 | + return { |
| 44 | + "type": "doctest", |
| 45 | + "file": filename, |
| 46 | + "passed": 0, |
| 47 | + "failed": 1, |
| 48 | + "coverage": 0 |
| 49 | + } |
| 50 | + |
| 51 | + cov.stop() |
| 52 | + cov.save() |
| 53 | + |
| 54 | + try: |
| 55 | + analysis = cov.analysis(file_path) |
| 56 | + covered = len(analysis[1]) |
| 57 | + missed = len(analysis[3]) |
| 58 | + buffer = io.StringIO() |
| 59 | + percent = cov.report(file=buffer, show_missing=False) |
| 60 | + buffer_output = buffer.getvalue() |
| 61 | + buffer.close() |
| 62 | + except Exception as e: |
| 63 | + print(f"⚠️ Coverage analysis failed for {file_path}: {e}") |
| 64 | + covered = missed = percent = 0 |
| 65 | + |
| 66 | + print(f"✅ Passed: {result.attempted - result.failed}, ❌ Failed: {result.failed}") |
| 67 | + print(f"📊 Coverage: {percent:.1f}%") |
| 68 | + return { |
| 69 | + "type": "doctest", |
| 70 | + "file": filename, |
| 71 | + "passed": result.attempted - result.failed, |
| 72 | + "failed": result.failed, |
| 73 | + "coverage": percent |
| 74 | + } |
| 75 | + |
| 76 | +def run_pytest_with_coverage(pytest_dir, filename): |
| 77 | + print(f"\n🧪 Running pytest in: {pytest_dir}") |
| 78 | + |
| 79 | + subprocess.run([ |
| 80 | + "coverage", "run", "--parallel-mode", "-m", "pytest", pytest_dir, |
| 81 | + "--tb=short", "--disable-warnings" |
| 82 | + ]) |
| 83 | + |
| 84 | + subprocess.run(["coverage", "combine"]) |
| 85 | + |
| 86 | + try: |
| 87 | + report = subprocess.run( |
| 88 | + ["coverage", "report"], |
| 89 | + capture_output=True, |
| 90 | + text=True |
| 91 | + ) |
| 92 | + output = report.stdout |
| 93 | + print(output) |
| 94 | + |
| 95 | + lines = output.strip().splitlines() |
| 96 | + last_line = lines[-1] if lines else "" |
| 97 | + percent = float(last_line.split()[-1].replace("%", "")) if "%" in last_line else 0.0 |
| 98 | + except Exception as e: |
| 99 | + print(f"⚠️ Coverage analysis failed for {pytest_dir}: {e}") |
| 100 | + percent = 0.0 |
| 101 | + |
| 102 | + print(f"📊 Coverage: {percent:.1f}%") |
| 103 | + return { |
| 104 | + "type": "pytest", |
| 105 | + "file": filename, |
| 106 | + "passed": "-", |
| 107 | + "failed": "-", |
| 108 | + "coverage": percent |
| 109 | + } |
| 110 | + |
| 111 | +def run_all_tests(doctest_dirs, pytest_dirs, project_root): |
| 112 | + summary = [] |
| 113 | + |
| 114 | + for parent_directory in doctest_dirs: |
| 115 | + for dirpath, _, filenames in os.walk(parent_directory): |
| 116 | + for filename in filenames: |
| 117 | + if filename.endswith(".py"): |
| 118 | + file_path = os.path.join(dirpath, filename) |
| 119 | + if has_doctest(file_path): |
| 120 | + result = run_doctest_with_coverage(file_path, project_root, filename) |
| 121 | + summary.append(result) |
| 122 | + |
| 123 | + for test_dir in pytest_dirs: |
| 124 | + result = run_pytest_with_coverage(test_dir, filename) |
| 125 | + summary.append(result) |
| 126 | + |
| 127 | + print("\n\n\n############# COMBINED SUMMARY REPORT #############\n") |
| 128 | + print_summary(summary) |
| 129 | + |
| 130 | + |
| 131 | +def print_summary(summary): |
| 132 | + if not summary: |
| 133 | + print("No tests found.") |
| 134 | + return |
| 135 | + |
| 136 | + # Compute column widths dynamically |
| 137 | + type_width = max(len("Type"), max(len(i["type"]) for i in summary)) |
| 138 | + file_width = max(len("File"), max(len(i["file"]) for i in summary)) |
| 139 | + passed_width = max(len("Passed"), max(len(str(i["passed"])) for i in summary)) |
| 140 | + failed_width = max(len("Failed"), max(len(str(i["failed"])) for i in summary)) |
| 141 | + coverage_width = max(len("Coverage"), max(len(f"{i['coverage']:.1f}%") for i in summary)) |
| 142 | + |
| 143 | + # Build border line |
| 144 | + border = ( |
| 145 | + f"+{'-' * (type_width+4)}+{'-' * (file_width+2)}+{'-' * (passed_width+6)}+" |
| 146 | + f"{'-' * (failed_width+6)}+{'-' * (coverage_width+6)}+" |
| 147 | + ) |
| 148 | + |
| 149 | + # Header with emojis only in the title |
| 150 | + header = ( |
| 151 | + f"| {'Type':<{type_width+3}}" |
| 152 | + f"| {'File':<{file_width+1}}" |
| 153 | + f"| {'Passed':<{passed_width+5}}" |
| 154 | + f"| {'Failed':<{failed_width+5}}" |
| 155 | + f"| {'Coverage':<{coverage_width+5}}|" |
| 156 | + ) |
| 157 | + |
| 158 | + print(border) |
| 159 | + print(header) |
| 160 | + print(border) |
| 161 | + |
| 162 | + # Initialize totals |
| 163 | + total_passed, total_failed, coverage_values = 0, 0, [] |
| 164 | + |
| 165 | + # Print rows |
| 166 | + for item in summary: |
| 167 | + label = "pytest" if item["type"] == "pytest" else "doctest" |
| 168 | + coverage_str = f"{item['coverage']:.1f}%" |
| 169 | + row = ( |
| 170 | + f"| {label:<{type_width+3}}" |
| 171 | + f"| {item['file']:<{file_width+1}}" |
| 172 | + f"| {item['passed']:<{passed_width+5}}" |
| 173 | + f"| {item['failed']:<{failed_width+5}}" |
| 174 | + f"| {coverage_str:<{coverage_width+5}}|" |
| 175 | + ) |
| 176 | + print(row) |
| 177 | + |
| 178 | + # Update totals for average calculation |
| 179 | + if isinstance(item["passed"], int): |
| 180 | + total_passed += item["passed"] |
| 181 | + if isinstance(item["failed"], int): |
| 182 | + total_failed += item["failed"] |
| 183 | + if isinstance(item["coverage"], (int, float)): |
| 184 | + coverage_values.append(item["coverage"]) |
| 185 | + |
| 186 | + print(border) |
| 187 | + |
| 188 | + # Compute average coverage |
| 189 | + avg_coverage = sum(coverage_values) / len(coverage_values) if coverage_values else 0 |
| 190 | + |
| 191 | + # Totals row |
| 192 | + total_row = ( |
| 193 | + f"| {'TOTAL':<{type_width+3}}" |
| 194 | + f"| {'':<{file_width+1}}" |
| 195 | + f"| {total_passed:<{passed_width+5}}" |
| 196 | + f"| {total_failed:<{failed_width+5}}" |
| 197 | + f"| {avg_coverage:.1f}%" |
| 198 | + f"{' ' * (coverage_width+4-len(f'{avg_coverage:.1f}%'))}|" |
| 199 | + ) |
| 200 | + print(total_row) |
| 201 | + print(border) |
| 202 | + |
| 203 | + |
| 204 | +if __name__ == "__main__": |
| 205 | + ## project_root = "/Users/uzairmukadam/Projects/TheAlgorithms-Python" |
| 206 | + |
| 207 | + project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) |
| 208 | + print(project_root) |
| 209 | + |
| 210 | + doctest_dirs = [ |
| 211 | + f"{project_root}/data_structures/", |
| 212 | + ] |
| 213 | + |
| 214 | + pytest_dirs = [ |
| 215 | + f"{project_root}/data_structures/", |
| 216 | + ] |
| 217 | + |
| 218 | + run_all_tests(doctest_dirs, pytest_dirs, project_root) |
0 commit comments