Skip to content

Commit 8709e0b

Browse files
test(usage)+harden: model_name handoff coverage, serialize round-trip, defensive _get_model_name
Three small follow-ups from the CodeRabbit review of PR #4: * test_multi_agent_run_attributes_usage_to_correct_agents now sets a distinct model attribute on each FakeModel and asserts model_name on both the triage and specialist RequestUsage entries, so a regression that drops or cross-wires model_name during handoff aggregation will surface here too. * Adds test_serialize_deserialize_roundtrip_preserves_agent_and_model_names that builds a Usage with both a named and an unnamed RequestUsage entry, runs serialize_usage then deserialize_usage, and asserts both fields survive on the named entry while staying None on the unnamed one. This exercises both branches of the conditional emit in serialize_usage. * Hardens _get_model_name against custom Model implementations whose descriptors or properties raise on attribute access: the helper now swallows exceptions, falls back to model_name when model is missing, and rejects empty strings. This keeps usage accounting from ever crashing the run on a third-party Model.
1 parent 09c9bbb commit 8709e0b

2 files changed

Lines changed: 75 additions & 6 deletions

File tree

src/agents/run_internal/run_loop.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -265,13 +265,23 @@ def _get_model_name(model: Any) -> str | None:
265265
266266
Most built-in model implementations (``OpenAIResponsesModel``,
267267
``OpenAIChatCompletionsModel``) expose a ``model`` attribute that contains
268-
the underlying model name string. This helper retrieves it in a
269-
forward-compatible, duck-typed way so that third-party model implementations
270-
that may not have this attribute are handled gracefully.
268+
the underlying model name string. This helper retrieves it in a
269+
forward-compatible, duck-typed way so that third-party model
270+
implementations are handled gracefully.
271+
272+
The function tries ``model`` first, then ``model_name`` for
273+
implementations that prefer that name. Any exception raised while
274+
resolving the attribute (for example, a custom descriptor or property
275+
that throws) is swallowed so that usage accounting can never crash the
276+
run, and ``None`` is returned when no string-valued name is available.
271277
"""
272-
model_name = getattr(model, "model", None)
273-
if isinstance(model_name, str):
274-
return model_name
278+
for attr in ("model", "model_name"):
279+
try:
280+
value = getattr(model, attr, None)
281+
except Exception:
282+
continue
283+
if isinstance(value, str) and value:
284+
return value
275285
return None
276286

277287

tests/test_usage.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,56 @@ def test_request_usage_default_agent_model_names_are_none():
397397
assert entry.model_name is None
398398

399399

400+
def test_serialize_deserialize_roundtrip_preserves_agent_and_model_names():
401+
"""JSON round-trip must preserve agent_name and model_name on each entry.
402+
403+
This guards against a regression where serialize_usage drops the new
404+
attribution fields, or deserialize_usage forgets to read them back.
405+
Both branches of the conditional emit (entry-with-name and entry-without-name)
406+
are exercised so the all-None fast path can't silently strip the keys.
407+
"""
408+
from agents.usage import deserialize_usage, serialize_usage
409+
410+
named_entry = RequestUsage(
411+
input_tokens=10,
412+
output_tokens=5,
413+
total_tokens=15,
414+
input_tokens_details=InputTokensDetails(cached_tokens=0),
415+
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
416+
agent_name="Math Tutor",
417+
model_name="gpt-4o",
418+
)
419+
unnamed_entry = RequestUsage(
420+
input_tokens=2,
421+
output_tokens=1,
422+
total_tokens=3,
423+
input_tokens_details=InputTokensDetails(cached_tokens=0),
424+
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
425+
)
426+
original = Usage(
427+
requests=2,
428+
input_tokens=12,
429+
output_tokens=6,
430+
total_tokens=18,
431+
request_usage_entries=[named_entry, unnamed_entry],
432+
)
433+
434+
restored = deserialize_usage(serialize_usage(original))
435+
436+
assert len(restored.request_usage_entries) == 2
437+
restored_named = restored.request_usage_entries[0]
438+
restored_unnamed = restored.request_usage_entries[1]
439+
440+
assert restored_named.agent_name == "Math Tutor"
441+
assert restored_named.model_name == "gpt-4o"
442+
assert restored_named.input_tokens == 10
443+
assert restored_named.output_tokens == 5
444+
445+
assert restored_unnamed.agent_name is None
446+
assert restored_unnamed.model_name is None
447+
assert restored_unnamed.input_tokens == 2
448+
449+
400450
def test_request_usage_with_agent_and_model_names():
401451
"""RequestUsage can be created with explicit agent_name and model_name."""
402452
entry = RequestUsage(
@@ -584,10 +634,12 @@ async def test_multi_agent_run_attributes_usage_to_correct_agents():
584634
)
585635

586636
specialist_model = FakeModel(initial_output=[get_text_message("specialist done")])
637+
specialist_model.model = "gpt-4o-specialist" # type: ignore[attr-defined]
587638
specialist_model.set_hardcoded_usage(specialist_usage)
588639
specialist_agent = Agent(name="Specialist Agent", model=specialist_model)
589640

590641
triage_model = FakeModel()
642+
triage_model.model = "gpt-4o-triage" # type: ignore[attr-defined]
591643
triage_model.add_multiple_turn_outputs(
592644
[
593645
[get_handoff_tool_call(specialist_agent)],
@@ -607,9 +659,16 @@ async def test_multi_agent_run_attributes_usage_to_correct_agents():
607659

608660
triage_entry = next(e for e in all_entries if e.agent_name == "Triage Agent")
609661
assert triage_entry.input_tokens == 100
662+
assert triage_entry.model_name == "gpt-4o-triage", (
663+
f"Triage entry model_name should be 'gpt-4o-triage', got {triage_entry.model_name!r}"
664+
)
610665

611666
specialist_entry = next(e for e in all_entries if e.agent_name == "Specialist Agent")
612667
assert specialist_entry.input_tokens == 200
668+
assert specialist_entry.model_name == "gpt-4o-specialist", (
669+
"Specialist entry model_name should be 'gpt-4o-specialist', "
670+
f"got {specialist_entry.model_name!r}"
671+
)
613672

614673

615674
def test_add_does_not_mutate_other_entries() -> None:

0 commit comments

Comments
 (0)