Skip to content

Commit 988d99b

Browse files
Add v3 Locations performance test and update script support
- Add TestDojoImporterPerformanceSmallLocations with V3_FEATURE_LOCATIONS - Update update_performance_test_counts.py to run both v2 and v3 test classes - Add --no-keepdb and EXTRA_ARGS to run-unittest.sh for test flexibility
1 parent 6f36fc9 commit 988d99b

3 files changed

Lines changed: 277 additions & 93 deletions

File tree

run-unittest.sh

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#!/usr/bin/env bash
22
unset TEST_CASE
33
unset FAIL_FAST
4+
unset KEEP_DB
5+
EXTRA_ARGS=()
46

57
bash ./docker/docker-compose-check.sh
68
if [[ $? -eq 1 ]]; then exit 1; fi
@@ -12,6 +14,7 @@ usage() {
1214
echo "Options:"
1315
echo " --test-case -t {YOUR_FULLY_QUALIFIED_TEST_CASE}"
1416
echo " --fail-fast -f - stop on first test failure"
17+
echo " --no-keepdb - recreate the test database (don't reuse existing)"
1518
echo " --help -h - prints this dialogue."
1619
echo
1720
echo "You must specify a test case (arg)!"
@@ -41,6 +44,10 @@ while [[ $# -gt 0 ]]; do
4144
FAIL_FAST="--failfast"
4245
shift # past argument
4346
;;
47+
--no-keepdb)
48+
KEEP_DB=""
49+
shift # past argument
50+
;;
4451
-h|--help)
4552
usage
4653
exit 0
@@ -66,4 +73,5 @@ echo "Running docker compose unit tests with test case $TEST_CASE ..."
6673
# Compose V2 integrates compose functions into the Docker platform, continuing to support
6774
# most of the previous docker-compose features and flags. You can run Compose V2 by
6875
# replacing the hyphen (-) with a space, using docker compose, instead of docker-compose.
69-
docker compose exec uwsgi bash -c "python manage.py test $TEST_CASE -v2 --keepdb $FAIL_FAST"
76+
KEEP_DB="${KEEP_DB:---keepdb}"
77+
docker compose exec uwsgi bash -c "python manage.py test $TEST_CASE -v2 $KEEP_DB $FAIL_FAST ${EXTRA_ARGS[*]}"

scripts/update_performance_test_counts.py

Lines changed: 124 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,18 @@
1111
1212
How to run:
1313
14-
# Default: Update the test file (uses TestDojoImporterPerformanceSmall by default)
14+
# Default: Update both v2 and v3 test classes
1515
python3 scripts/update_performance_test_counts.py
1616
17-
# Or specify a different test class:
18-
python3 scripts/update_performance_test_counts.py --test-class TestDojoImporterPerformanceSmall
19-
2017
# Step 1: Run tests and generate report only (without updating)
2118
python3 scripts/update_performance_test_counts.py --report-only
2219
2320
# Step 2: Verify all tests pass
2421
python3 scripts/update_performance_test_counts.py --verify
2522
26-
The script defaults to TestDojoImporterPerformanceSmall if --test-class is not provided.
27-
The script defaults to --update behavior if no action flag is provided.
23+
The script always runs and updates both TestDojoImporterPerformanceSmall (v2) and
24+
TestDojoImporterPerformanceSmallLocations (v3). The script defaults to --update
25+
behavior if no action flag is provided.
2826
"""
2927

3028
import argparse
@@ -36,6 +34,12 @@
3634
# Path to the test file
3735
TEST_FILE = Path(__file__).parent.parent / "unittests" / "test_importers_performance.py"
3836

37+
# Both v2 and v3 performance test classes - script always updates/verifies both
38+
TEST_CLASSES = (
39+
"TestDojoImporterPerformanceSmall",
40+
"TestDojoImporterPerformanceSmallLocations",
41+
)
42+
3943

4044
class TestCount:
4145

@@ -64,9 +68,9 @@ def extract_test_methods(test_class: str) -> list[str]:
6468

6569
content = TEST_FILE.read_text()
6670

67-
# Find the test class definition
71+
# Find the test class definition (use (?<!-) to avoid matching "test-class" in docstrings)
6872
class_pattern = re.compile(
69-
rf"class {re.escape(test_class)}.*?(?=class |\Z)",
73+
rf"class {re.escape(test_class)}.*?(?=(?<!-)class \w|\Z)",
7074
re.DOTALL,
7175
)
7276
class_match = class_pattern.search(content)
@@ -362,12 +366,17 @@ def generate_report(counts: list[TestCount], expected_counts: dict[str, dict[str
362366

363367
print("=" * 80)
364368
print("\nTo update the test file, run:")
365-
print(f" python scripts/update_performance_test_counts.py --test-class {test_name.split('_')[0]} --update")
369+
print(" python3 scripts/update_performance_test_counts.py")
366370
print()
367371

368372

369-
def update_test_file(counts: list[TestCount]):
370-
"""Update the test file with new expected counts."""
373+
def update_test_file(counts: list[TestCount], test_class: str | None = None):
374+
"""
375+
Update the test file with new expected counts.
376+
377+
When test_class is provided, only update test methods within that class.
378+
This is required when multiple classes (e.g. v2 and v3) share the same test method names.
379+
"""
371380
if not counts:
372381
print("No counts to update.")
373382
return
@@ -419,22 +428,37 @@ def _extract_call_span(method_content: str, call_name: str) -> tuple[int, int] |
419428
"second_import_async_tasks": "expected_num_async_tasks2",
420429
}
421430

431+
# Restrict search to the specified test class if given
432+
search_content = content
433+
search_offset = 0
434+
if test_class:
435+
class_pattern = re.compile(
436+
rf"class {re.escape(test_class)}.*?(?=(?<!-)class \w|\Z)",
437+
re.DOTALL,
438+
)
439+
class_match = class_pattern.search(content)
440+
if not class_match:
441+
print(f"⚠️ Warning: Could not find test class {test_class}")
442+
return
443+
search_content = class_match.group(0)
444+
search_offset = class_match.start()
445+
422446
# Update each test method
423447
for test_name, test_updates in updates.items():
424448
print(f" Updating {test_name}...")
425-
# Find the test method boundaries
449+
# Find the test method boundaries within the search scope
426450
test_method_pattern = re.compile(
427451
rf"(def {re.escape(test_name)}\([^)]*\):.*?)(?=def test_|\Z)",
428452
re.DOTALL,
429453
)
430-
test_match = test_method_pattern.search(content)
454+
test_match = test_method_pattern.search(search_content)
431455
if not test_match:
432456
print(f"⚠️ Warning: Could not find test method {test_name}")
433457
continue
434458

435459
test_method_content = test_match.group(1)
436-
test_method_start = test_match.start()
437-
test_method_end = test_match.end()
460+
test_method_start = search_offset + test_match.start()
461+
test_method_end = search_offset + test_match.end()
438462

439463
call_span = _extract_call_span(test_method_content, "self._import_reimport_performance")
440464
param_map = param_map_import_reimport
@@ -543,12 +567,6 @@ def main():
543567
formatter_class=argparse.RawDescriptionHelpFormatter,
544568
epilog=__doc__,
545569
)
546-
parser.add_argument(
547-
"--test-class",
548-
required=False,
549-
default="TestDojoImporterPerformanceSmall",
550-
help="Test class name (e.g., TestDojoImporterPerformanceSmall). Defaults to TestDojoImporterPerformanceSmall if not provided.",
551-
)
552570
parser.add_argument(
553571
"--report-only",
554572
action="store_true",
@@ -568,96 +586,111 @@ def main():
568586
args = parser.parse_args()
569587

570588
if args.report_only:
571-
# Step 1: Run tests and generate report
572-
# Run each test method individually
573-
test_methods = extract_test_methods(args.test_class)
574-
if not test_methods:
575-
print(f"⚠️ No test methods found in {args.test_class}")
576-
sys.exit(1)
577-
578-
print(f"\nFound {len(test_methods)} test method(s) in {args.test_class}")
579-
print("=" * 80)
580-
589+
# Step 1: Run tests and generate report for both test classes
581590
all_counts = []
582-
for test_method in test_methods:
583-
print(f"\n{'=' * 80}")
584-
output, return_code = run_test_method(args.test_class, test_method)
585-
success, error_msg = check_test_execution_success(output, return_code)
586-
if not success:
587-
print(f"\n⚠️ Test execution failed for {test_method}: {error_msg}")
588-
print("Skipping this test method...")
591+
for test_class in TEST_CLASSES:
592+
test_methods = extract_test_methods(test_class)
593+
if not test_methods:
594+
print(f"⚠️ No test methods found in {test_class}")
589595
continue
590596

591-
counts = parse_test_output(output)
592-
if counts:
593-
all_counts.extend(counts)
597+
print(f"\n{'=' * 80}")
598+
print(f"Test class: {test_class} ({len(test_methods)} methods)")
599+
print("=" * 80)
600+
601+
for test_method in test_methods:
602+
print("\n---")
603+
output, return_code = run_test_method(test_class, test_method)
604+
success, error_msg = check_test_execution_success(output, return_code)
605+
if not success:
606+
print(f"\n⚠️ Test execution failed for {test_method}: {error_msg}")
607+
print("Skipping this test method...")
608+
continue
609+
610+
counts = parse_test_output(output)
611+
if counts:
612+
all_counts.extend(counts)
594613

595-
expected_counts = extract_expected_counts_from_file(args.test_class)
596-
generate_report(all_counts, expected_counts)
614+
if all_counts:
615+
generate_report(all_counts, {})
616+
else:
617+
print("✅ All tests passed! No count differences found.")
597618

598619
elif args.verify:
599-
# Step 3: Verify
600-
success = verify_tests(args.test_class)
601-
sys.exit(0 if success else 1)
620+
# Step 3: Verify both test classes
621+
all_pass = True
622+
for test_class in TEST_CLASSES:
623+
if not verify_tests(test_class):
624+
all_pass = False
625+
sys.exit(0 if all_pass else 1)
602626

603627
else:
604628
# Default: Update the file (--update is the default behavior)
605-
# Run each test method individually
606-
test_methods = extract_test_methods(args.test_class)
607-
if not test_methods:
608-
print(f"⚠️ No test methods found in {args.test_class}")
609-
sys.exit(1)
610-
611-
print(f"\nFound {len(test_methods)} test method(s) in {args.test_class}")
612-
print("=" * 80)
613-
629+
# Run each test method in both test classes
614630
all_counts = []
615-
for test_method in test_methods:
616-
print(f"\n{'=' * 80}")
617-
output, return_code = run_test_method(args.test_class, test_method)
618-
success, error_msg = check_test_execution_success(output, return_code)
619-
if not success:
620-
print(f"\n⚠️ Test execution failed for {test_method}: {error_msg}")
621-
print("Skipping this test method...")
631+
for test_class in TEST_CLASSES:
632+
test_methods = extract_test_methods(test_class)
633+
if not test_methods:
634+
print(f"⚠️ No test methods found in {test_class}")
622635
continue
623636

624-
counts = parse_test_output(output)
625-
626-
# Check if test actually passed
627-
test_passed = "OK" in output or ("Ran" in output and "FAILED" not in output and return_code == 0)
628-
629-
if counts:
630-
all_counts.extend(counts)
631-
# Update immediately after each test
632-
update_test_file(counts)
633-
print(f"⚠️ {test_method}: Found {len(counts)} count mismatch(es) - updated file")
634-
elif test_passed:
635-
print(f"✅ {test_method}: Test passed, all counts match")
636-
elif return_code != 0:
637-
# Test might have failed for other reasons
638-
print(f"⚠️ {test_method}: Test failed (exit code {return_code}) but no count mismatches parsed")
639-
print(" This might indicate a parsing issue or a different type of failure")
640-
# Show a snippet of the output to help debug
641-
fail_lines = [line for line in output.split("\n") if "FAIL" in line or "Error" in line or "Exception" in line]
642-
if fail_lines:
643-
print(" Relevant error lines:")
644-
for line in fail_lines[:5]:
645-
print(f" {line}")
637+
print(f"\n{'=' * 80}")
638+
print(f"Test class: {test_class} ({len(test_methods)} methods)")
639+
print("=" * 80)
640+
641+
for test_method in test_methods:
642+
print("\n---")
643+
output, return_code = run_test_method(test_class, test_method)
644+
success, error_msg = check_test_execution_success(output, return_code)
645+
if not success:
646+
print(f"\n⚠️ Test execution failed for {test_method}: {error_msg}")
647+
print("Skipping this test method...")
648+
continue
649+
650+
counts = parse_test_output(output)
651+
652+
# Check if test actually passed
653+
test_passed = "OK" in output or ("Ran" in output and "FAILED" not in output and return_code == 0)
654+
655+
if counts:
656+
all_counts.extend(counts)
657+
# Update immediately after each test
658+
update_test_file(counts, test_class=test_class)
659+
print(f"⚠️ {test_method}: Found {len(counts)} count mismatch(es) - updated file")
660+
elif test_passed:
661+
print(f"✅ {test_method}: Test passed, all counts match")
662+
elif return_code != 0:
663+
# Test might have failed for other reasons
664+
print(f"⚠️ {test_method}: Test failed (exit code {return_code}) but no count mismatches parsed")
665+
print(" This might indicate a parsing issue or a different type of failure")
666+
fail_lines = [line for line in output.split("\n") if "FAIL" in line or "Error" in line or "Exception" in line]
667+
if fail_lines:
668+
print(" Relevant error lines:")
669+
for line in fail_lines[:5]:
670+
print(f" {line}")
646671

647672
if all_counts:
648673
print(f"\n{'=' * 80}")
649674
print(f"✅ Updated {len(all_counts)} count(s) across {len({c.test_name for c in all_counts})} test(s)")
650675
# Some performance counts can vary depending on test ordering / keepdb state.
651676
# Do a final full-suite pass and apply any remaining mismatches so the suite passes as run in CI.
652677
print("\nRunning a final verify pass for stability...")
653-
success, suite_mismatches = verify_and_get_mismatches(args.test_class)
654-
if not success and suite_mismatches:
655-
print("\nApplying remaining mismatches from full-suite run...")
656-
update_test_file(suite_mismatches)
678+
all_pass = True
679+
for test_class in TEST_CLASSES:
680+
success, suite_mismatches = verify_and_get_mismatches(test_class)
681+
if not success and suite_mismatches:
682+
print(f"\nApplying remaining mismatches from {test_class}...")
683+
update_test_file(suite_mismatches, test_class=test_class)
684+
all_pass = False
685+
if not all_pass:
657686
print("\nRe-running verify...")
658-
success, _ = verify_and_get_mismatches(args.test_class)
659-
sys.exit(0 if success else 1)
660-
sys.exit(0 if success else 1)
687+
all_pass = True
688+
for test_class in TEST_CLASSES:
689+
success, _ = verify_and_get_mismatches(test_class)
690+
if not success:
691+
all_pass = False
692+
sys.exit(0 if all_pass else 1)
693+
sys.exit(0 if all_pass else 1)
661694
else:
662695
print(f"\n{'=' * 80}")
663696
print("\n✅ No differences found. All tests are already up to date.")

0 commit comments

Comments
 (0)