Skip to content

Commit 00d0eb6

Browse files
Yeachan-Heoclaude
andcommitted
US-024: Add token limit metadata for kimi models
Add ModelTokenLimit entries for kimi-k2.5 and kimi-k1.5 to enable preflight context window validation. Per Moonshot AI documentation: - Context window: 256,000 tokens - Max output: 16,384 tokens Includes 3 unit tests: - returns_context_window_metadata_for_kimi_models - kimi_alias_resolves_to_kimi_k25_token_limits - preflight_blocks_oversized_requests_for_kimi_models All tests pass, clippy clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 8d8e2c3 commit 00d0eb6

2 files changed

Lines changed: 87 additions & 3 deletions

File tree

prd.json

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -329,13 +329,28 @@
329329
],
330330
"passes": true,
331331
"priority": "P1"
332+
},
333+
{
334+
"id": "US-024",
335+
"title": "Add token limit metadata for kimi models",
336+
"description": "The model_token_limit() function has no entries for kimi-k2.5 or kimi-k1.5, causing preflight context window validation to skip these models. Add token limit metadata to enable preflight checks and accurate max token defaults. Per Moonshot AI documentation, kimi-k2.5 supports 256K context window and 16K max output tokens.",
337+
"acceptanceCriteria": [
338+
"model_token_limit('kimi-k2.5') returns Some(ModelTokenLimit { max_output_tokens: 16384, context_window_tokens: 256000 })",
339+
"model_token_limit('kimi-k1.5') returns appropriate limits",
340+
"model_token_limit('kimi') follows alias chain (kimi → kimi-k2.5) and returns k2.5 limits",
341+
"preflight_message_request() validates context window for kimi models (via generic preflight, no provider-specific code needed)",
342+
"Unit tests verify limits and preflight behavior for kimi models",
343+
"All tests pass and clippy is clean"
344+
],
345+
"passes": true,
346+
"priority": "P1"
332347
}
333348
],
334349
"metadata": {
335-
"lastUpdated": "2026-04-16",
336-
"completedStories": ["US-001", "US-002", "US-003", "US-004", "US-005", "US-006", "US-007", "US-008", "US-009", "US-010", "US-011", "US-012", "US-013", "US-014", "US-015", "US-016", "US-017", "US-018", "US-019", "US-020", "US-021", "US-022", "US-023"],
350+
"lastUpdated": "2026-04-17",
351+
"completedStories": ["US-001", "US-002", "US-003", "US-004", "US-005", "US-006", "US-007", "US-008", "US-009", "US-010", "US-011", "US-012", "US-013", "US-014", "US-015", "US-016", "US-017", "US-018", "US-019", "US-020", "US-021", "US-022", "US-023", "US-024"],
337352
"inProgressStories": [],
338-
"totalStories": 23,
353+
"totalStories": 24,
339354
"status": "completed"
340355
}
341356
}

rust/crates/api/src/providers/mod.rs

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,12 @@ pub fn model_token_limit(model: &str) -> Option<ModelTokenLimit> {
289289
max_output_tokens: 64_000,
290290
context_window_tokens: 131_072,
291291
}),
292+
// Kimi models via DashScope (Moonshot AI)
293+
// Source: https://platform.moonshot.cn/docs/intro
294+
"kimi-k2.5" | "kimi-k1.5" => Some(ModelTokenLimit {
295+
max_output_tokens: 16_384,
296+
context_window_tokens: 256_000,
297+
}),
292298
_ => None,
293299
}
294300
}
@@ -744,6 +750,69 @@ mod tests {
744750
.expect("models without context metadata should skip the guarded preflight");
745751
}
746752

753+
#[test]
754+
fn returns_context_window_metadata_for_kimi_models() {
755+
// kimi-k2.5
756+
let k25_limit = model_token_limit("kimi-k2.5")
757+
.expect("kimi-k2.5 should have token limit metadata");
758+
assert_eq!(k25_limit.max_output_tokens, 16_384);
759+
assert_eq!(k25_limit.context_window_tokens, 256_000);
760+
761+
// kimi-k1.5
762+
let k15_limit = model_token_limit("kimi-k1.5")
763+
.expect("kimi-k1.5 should have token limit metadata");
764+
assert_eq!(k15_limit.max_output_tokens, 16_384);
765+
assert_eq!(k15_limit.context_window_tokens, 256_000);
766+
}
767+
768+
#[test]
769+
fn kimi_alias_resolves_to_kimi_k25_token_limits() {
770+
// The "kimi" alias resolves to "kimi-k2.5" via resolve_model_alias()
771+
let alias_limit = model_token_limit("kimi")
772+
.expect("kimi alias should resolve to kimi-k2.5 limits");
773+
let direct_limit = model_token_limit("kimi-k2.5")
774+
.expect("kimi-k2.5 should have limits");
775+
assert_eq!(alias_limit.max_output_tokens, direct_limit.max_output_tokens);
776+
assert_eq!(
777+
alias_limit.context_window_tokens,
778+
direct_limit.context_window_tokens
779+
);
780+
}
781+
782+
#[test]
783+
fn preflight_blocks_oversized_requests_for_kimi_models() {
784+
let request = MessageRequest {
785+
model: "kimi-k2.5".to_string(),
786+
max_tokens: 16_384,
787+
messages: vec![InputMessage {
788+
role: "user".to_string(),
789+
content: vec![InputContentBlock::Text {
790+
text: "x".repeat(1_000_000), // Large input to exceed context window
791+
}],
792+
}],
793+
system: Some("Keep the answer short.".to_string()),
794+
tools: None,
795+
tool_choice: None,
796+
stream: true,
797+
..Default::default()
798+
};
799+
800+
let error = preflight_message_request(&request)
801+
.expect_err("oversized request should be rejected for kimi models");
802+
803+
match error {
804+
ApiError::ContextWindowExceeded {
805+
model,
806+
context_window_tokens,
807+
..
808+
} => {
809+
assert_eq!(model, "kimi-k2.5");
810+
assert_eq!(context_window_tokens, 256_000);
811+
}
812+
other => panic!("expected context-window preflight failure, got {other:?}"),
813+
}
814+
}
815+
747816
#[test]
748817
fn parse_dotenv_extracts_keys_handles_comments_quotes_and_export_prefix() {
749818
// given

0 commit comments

Comments
 (0)