@@ -666,6 +666,7 @@ def function_callning_reply_messages(
666666 from derisk .core import ModelMessageRoleType
667667
668668 ## 历史消息
669+ has_tool_calls = llm_out and llm_out .tool_calls
669670 if llm_out :
670671 llm_content = llm_out .content or ""
671672 if llm_out .thinking_content :
@@ -681,7 +682,11 @@ def function_callning_reply_messages(
681682 }
682683 )
683684
684- if action_outs :
685+ # 只有当 assistant 消息中含有 tool_calls 时,才追加 tool 结果消息。
686+ # 若 tool_calls 为 None/空,说明 LLM 未发起工具调用,此时追加 tool 消息会
687+ # 导致 API 报错:messages with role "tool" must be a response to a
688+ # preceeding message with "tool_calls"
689+ if action_outs and has_tool_calls :
685690 ## 准备当前轮次的ToolMessage
686691 for action_out in action_outs :
687692 function_call_reply_messages .append (
@@ -691,6 +696,12 @@ def function_callning_reply_messages(
691696 "content" : action_out .content ,
692697 }
693698 )
699+ elif action_outs and not has_tool_calls :
700+ logger .warning (
701+ f"[function_callning_reply_messages] Skipping { len (action_outs )} tool result(s) "
702+ f"because the preceding assistant message has no tool_calls. "
703+ f"This prevents invalid message sequences being sent to the LLM."
704+ )
694705
695706 return function_call_reply_messages
696707
@@ -1403,6 +1414,11 @@ async def thinking(
14031414 if tool_messages :
14041415 llm_messages .extend (tool_messages )
14051416
1417+ # 过滤非法消息序列:移除没有匹配 tool_calls 的孤立 tool 角色消息
1418+ # 否则 API 会报错:messages with role "tool" must be a response
1419+ # to a preceeding message with "tool_calls"
1420+ llm_messages = _sanitize_tool_messages (llm_messages )
1421+
14061422 if not self .llm_client :
14071423 raise ValueError ("LLM client is not initialized!" )
14081424
@@ -2719,5 +2735,56 @@ def _new_system_message(content):
27192735 return [{"content" : content , "role" : ModelMessageRoleType .SYSTEM }]
27202736
27212737
2738+ def _sanitize_tool_messages (messages : List [dict ]) -> List [dict ]:
2739+ """Remove orphaned 'tool' role messages that have no matching preceding
2740+ assistant message with 'tool_calls'.
2741+
2742+ OpenAI-compatible APIs require that every message with role='tool' is
2743+ immediately preceded (in the message sequence) by an assistant message
2744+ that contains a non-empty 'tool_calls' list. Sending orphaned tool
2745+ messages causes a 400 error:
2746+ "messages with role 'tool' must be a response to a preceeding message
2747+ with 'tool_calls'."
2748+
2749+ This helper scans the list in one pass and drops any tool message whose
2750+ preceding assistant message has no tool_calls.
2751+ """
2752+ if not messages :
2753+ return messages
2754+
2755+ sanitized : List [dict ] = []
2756+ orphan_count = 0
2757+
2758+ for msg in messages :
2759+ role = msg .get ("role" , "" )
2760+ if role == ModelMessageRoleType .TOOL :
2761+ # Check that the last assistant message has tool_calls
2762+ prev_assistant = None
2763+ for m in reversed (sanitized ):
2764+ if m .get ("role" ) == ModelMessageRoleType .AI :
2765+ prev_assistant = m
2766+ break
2767+ # Stop if we hit any non-assistant message after the last AI msg
2768+ if prev_assistant and prev_assistant .get ("tool_calls" ):
2769+ sanitized .append (msg )
2770+ else :
2771+ orphan_count += 1
2772+ logger .warning (
2773+ f"[_sanitize_tool_messages] Dropped orphaned tool message "
2774+ f"(tool_call_id={ msg .get ('tool_call_id' )!r} ) — "
2775+ f"no preceding assistant message with tool_calls."
2776+ )
2777+ else :
2778+ sanitized .append (msg )
2779+
2780+ if orphan_count :
2781+ logger .warning (
2782+ f"[_sanitize_tool_messages] Removed { orphan_count } orphaned tool "
2783+ f"message(s) from LLM input to prevent API 400 errors."
2784+ )
2785+
2786+ return sanitized
2787+
2788+
27222789def _is_list_of_type (lst : List [Any ], type_cls : type ) -> bool :
27232790 return all (isinstance (item , type_cls ) for item in lst )
0 commit comments