From 176986f6f26375df102c891594ed86df1ffe5394 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 14 May 2026 08:59:31 -0500 Subject: [PATCH] chore: Clarify track_judge_result guidance in create-judge example Co-Authored-By: Claude Opus 4.7 (1M context) --- features/create_judge/create_judge_example.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/features/create_judge/create_judge_example.py b/features/create_judge/create_judge_example.py index c188506..dd1b158 100644 --- a/features/create_judge/create_judge_example.py +++ b/features/create_judge/create_judge_example.py @@ -79,9 +79,10 @@ async def async_main(): judge_result = await judge.evaluate(input_text, output_text) - # Track the judge evaluation scores on the tracker for the aiConfig you are evaluating - # Example: - # aiConfig.create_tracker().track_judge_result(judge_result) + # If the output you're judging came from another AI Config, track the + # result on that config's tracker so the metric is attributed to the + # right config: + # ai_config.create_tracker().track_judge_result(judge_result) print("\nJudge result:") print(f"- judge_config_key: {judge_key}")