|
| 1 | +// Benchmarks for API request building performance |
| 2 | +// Benchmarks are exempt from strict linting as they are test/performance code |
| 3 | +#![allow( |
| 4 | + clippy::cognitive_complexity, |
| 5 | + clippy::doc_markdown, |
| 6 | + clippy::explicit_iter_loop, |
| 7 | + clippy::format_in_format_args, |
| 8 | + clippy::missing_docs_in_private_items, |
| 9 | + clippy::must_use_candidate, |
| 10 | + clippy::needless_pass_by_value, |
| 11 | + clippy::clone_on_copy, |
| 12 | + clippy::too_many_lines, |
| 13 | + clippy::uninlined_format_args |
| 14 | +)] |
| 15 | + |
| 16 | +use api::{ |
| 17 | + build_chat_completion_request, flatten_tool_result_content, is_reasoning_model, |
| 18 | + translate_message, InputContentBlock, InputMessage, MessageRequest, OpenAiCompatConfig, |
| 19 | + ToolResultContentBlock, |
| 20 | +}; |
| 21 | +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; |
| 22 | +use serde_json::json; |
| 23 | + |
| 24 | +/// Create a sample message request with various content types |
| 25 | +fn create_sample_request(message_count: usize) -> MessageRequest { |
| 26 | + let mut messages = Vec::with_capacity(message_count); |
| 27 | + |
| 28 | + for i in 0..message_count { |
| 29 | + match i % 4 { |
| 30 | + 0 => messages.push(InputMessage::user_text(format!("Message {}", i))), |
| 31 | + 1 => messages.push(InputMessage { |
| 32 | + role: "assistant".to_string(), |
| 33 | + content: vec![ |
| 34 | + InputContentBlock::Text { |
| 35 | + text: format!("Assistant response {}", i), |
| 36 | + }, |
| 37 | + InputContentBlock::ToolUse { |
| 38 | + id: format!("call_{}", i), |
| 39 | + name: "read_file".to_string(), |
| 40 | + input: json!({"path": format!("/tmp/file{}", i)}), |
| 41 | + }, |
| 42 | + ], |
| 43 | + }), |
| 44 | + 2 => messages.push(InputMessage { |
| 45 | + role: "user".to_string(), |
| 46 | + content: vec![InputContentBlock::ToolResult { |
| 47 | + tool_use_id: format!("call_{}", i - 1), |
| 48 | + content: vec![ToolResultContentBlock::Text { |
| 49 | + text: format!("Tool result content {}", i), |
| 50 | + }], |
| 51 | + is_error: false, |
| 52 | + }], |
| 53 | + }), |
| 54 | + _ => messages.push(InputMessage { |
| 55 | + role: "assistant".to_string(), |
| 56 | + content: vec![InputContentBlock::ToolUse { |
| 57 | + id: format!("call_{}", i), |
| 58 | + name: "write_file".to_string(), |
| 59 | + input: json!({"path": format!("/tmp/out{}", i), "content": "data"}), |
| 60 | + }], |
| 61 | + }), |
| 62 | + } |
| 63 | + } |
| 64 | + |
| 65 | + MessageRequest { |
| 66 | + model: "gpt-4o".to_string(), |
| 67 | + max_tokens: 1024, |
| 68 | + messages, |
| 69 | + stream: false, |
| 70 | + system: Some("You are a helpful assistant.".to_string()), |
| 71 | + temperature: Some(0.7), |
| 72 | + top_p: None, |
| 73 | + tools: None, |
| 74 | + tool_choice: None, |
| 75 | + frequency_penalty: None, |
| 76 | + presence_penalty: None, |
| 77 | + stop: None, |
| 78 | + reasoning_effort: None, |
| 79 | + } |
| 80 | +} |
| 81 | + |
| 82 | +/// Benchmark translate_message with various message types |
| 83 | +fn bench_translate_message(c: &mut Criterion) { |
| 84 | + let mut group = c.benchmark_group("translate_message"); |
| 85 | + |
| 86 | + // Text-only message |
| 87 | + let text_message = InputMessage::user_text("Simple text message".to_string()); |
| 88 | + group.bench_with_input( |
| 89 | + BenchmarkId::new("text_only", "single"), |
| 90 | + &text_message, |
| 91 | + |b, msg| { |
| 92 | + b.iter(|| translate_message(black_box(msg), black_box("gpt-4o"))); |
| 93 | + }, |
| 94 | + ); |
| 95 | + |
| 96 | + // Assistant message with tool calls |
| 97 | + let assistant_message = InputMessage { |
| 98 | + role: "assistant".to_string(), |
| 99 | + content: vec![ |
| 100 | + InputContentBlock::Text { |
| 101 | + text: "I'll help you with that.".to_string(), |
| 102 | + }, |
| 103 | + InputContentBlock::ToolUse { |
| 104 | + id: "call_1".to_string(), |
| 105 | + name: "read_file".to_string(), |
| 106 | + input: json!({"path": "/tmp/test"}), |
| 107 | + }, |
| 108 | + InputContentBlock::ToolUse { |
| 109 | + id: "call_2".to_string(), |
| 110 | + name: "write_file".to_string(), |
| 111 | + input: json!({"path": "/tmp/out", "content": "data"}), |
| 112 | + }, |
| 113 | + ], |
| 114 | + }; |
| 115 | + group.bench_with_input( |
| 116 | + BenchmarkId::new("assistant_with_tools", "2_tools"), |
| 117 | + &assistant_message, |
| 118 | + |b, msg| { |
| 119 | + b.iter(|| translate_message(black_box(msg), black_box("gpt-4o"))); |
| 120 | + }, |
| 121 | + ); |
| 122 | + |
| 123 | + // Tool result message |
| 124 | + let tool_result_message = InputMessage { |
| 125 | + role: "user".to_string(), |
| 126 | + content: vec![InputContentBlock::ToolResult { |
| 127 | + tool_use_id: "call_1".to_string(), |
| 128 | + content: vec![ToolResultContentBlock::Text { |
| 129 | + text: "File contents here".to_string(), |
| 130 | + }], |
| 131 | + is_error: false, |
| 132 | + }], |
| 133 | + }; |
| 134 | + group.bench_with_input( |
| 135 | + BenchmarkId::new("tool_result", "single"), |
| 136 | + &tool_result_message, |
| 137 | + |b, msg| { |
| 138 | + b.iter(|| translate_message(black_box(msg), black_box("gpt-4o"))); |
| 139 | + }, |
| 140 | + ); |
| 141 | + |
| 142 | + // Tool result for kimi model (is_error excluded) |
| 143 | + group.bench_with_input( |
| 144 | + BenchmarkId::new("tool_result_kimi", "kimi-k2.5"), |
| 145 | + &tool_result_message, |
| 146 | + |b, msg| { |
| 147 | + b.iter(|| translate_message(black_box(msg), black_box("kimi-k2.5"))); |
| 148 | + }, |
| 149 | + ); |
| 150 | + |
| 151 | + // Large content message |
| 152 | + let large_content = "x".repeat(10000); |
| 153 | + let large_message = InputMessage::user_text(large_content); |
| 154 | + group.bench_with_input( |
| 155 | + BenchmarkId::new("large_text", "10kb"), |
| 156 | + &large_message, |
| 157 | + |b, msg| { |
| 158 | + b.iter(|| translate_message(black_box(msg), black_box("gpt-4o"))); |
| 159 | + }, |
| 160 | + ); |
| 161 | + |
| 162 | + group.finish(); |
| 163 | +} |
| 164 | + |
| 165 | +/// Benchmark build_chat_completion_request with various message counts |
| 166 | +fn bench_build_request(c: &mut Criterion) { |
| 167 | + let mut group = c.benchmark_group("build_chat_completion_request"); |
| 168 | + let config = OpenAiCompatConfig::openai(); |
| 169 | + |
| 170 | + for message_count in [10, 50, 100].iter() { |
| 171 | + let request = create_sample_request(*message_count); |
| 172 | + group.bench_with_input( |
| 173 | + BenchmarkId::new("message_count", message_count), |
| 174 | + &request, |
| 175 | + |b, req| { |
| 176 | + b.iter(|| build_chat_completion_request(black_box(req), config.clone())); |
| 177 | + }, |
| 178 | + ); |
| 179 | + } |
| 180 | + |
| 181 | + // Benchmark with reasoning model (tuning params stripped) |
| 182 | + let mut reasoning_request = create_sample_request(50); |
| 183 | + reasoning_request.model = "o1-mini".to_string(); |
| 184 | + group.bench_with_input( |
| 185 | + BenchmarkId::new("reasoning_model", "o1-mini"), |
| 186 | + &reasoning_request, |
| 187 | + |b, req| { |
| 188 | + b.iter(|| build_chat_completion_request(black_box(req), config.clone())); |
| 189 | + }, |
| 190 | + ); |
| 191 | + |
| 192 | + // Benchmark with gpt-5 (max_completion_tokens) |
| 193 | + let mut gpt5_request = create_sample_request(50); |
| 194 | + gpt5_request.model = "gpt-5".to_string(); |
| 195 | + group.bench_with_input( |
| 196 | + BenchmarkId::new("gpt5", "gpt-5"), |
| 197 | + &gpt5_request, |
| 198 | + |b, req| { |
| 199 | + b.iter(|| build_chat_completion_request(black_box(req), config.clone())); |
| 200 | + }, |
| 201 | + ); |
| 202 | + |
| 203 | + group.finish(); |
| 204 | +} |
| 205 | + |
| 206 | +/// Benchmark flatten_tool_result_content |
| 207 | +fn bench_flatten_tool_result(c: &mut Criterion) { |
| 208 | + let mut group = c.benchmark_group("flatten_tool_result_content"); |
| 209 | + |
| 210 | + // Single text block |
| 211 | + let single_text = vec![ToolResultContentBlock::Text { |
| 212 | + text: "Simple result".to_string(), |
| 213 | + }]; |
| 214 | + group.bench_with_input( |
| 215 | + BenchmarkId::new("single_text", "1_block"), |
| 216 | + &single_text, |
| 217 | + |b, content| { |
| 218 | + b.iter(|| flatten_tool_result_content(black_box(content))); |
| 219 | + }, |
| 220 | + ); |
| 221 | + |
| 222 | + // Multiple text blocks |
| 223 | + let multi_text: Vec<ToolResultContentBlock> = (0..10) |
| 224 | + .map(|i| ToolResultContentBlock::Text { |
| 225 | + text: format!("Line {}: some content here\n", i), |
| 226 | + }) |
| 227 | + .collect(); |
| 228 | + group.bench_with_input( |
| 229 | + BenchmarkId::new("multi_text", "10_blocks"), |
| 230 | + &multi_text, |
| 231 | + |b, content| { |
| 232 | + b.iter(|| flatten_tool_result_content(black_box(content))); |
| 233 | + }, |
| 234 | + ); |
| 235 | + |
| 236 | + // JSON content blocks |
| 237 | + let json_content: Vec<ToolResultContentBlock> = (0..5) |
| 238 | + .map(|i| ToolResultContentBlock::Json { |
| 239 | + value: json!({"index": i, "data": "test content", "nested": {"key": "value"}}), |
| 240 | + }) |
| 241 | + .collect(); |
| 242 | + group.bench_with_input( |
| 243 | + BenchmarkId::new("json_content", "5_blocks"), |
| 244 | + &json_content, |
| 245 | + |b, content| { |
| 246 | + b.iter(|| flatten_tool_result_content(black_box(content))); |
| 247 | + }, |
| 248 | + ); |
| 249 | + |
| 250 | + // Mixed content |
| 251 | + let mixed_content = vec![ |
| 252 | + ToolResultContentBlock::Text { |
| 253 | + text: "Here's the result:".to_string(), |
| 254 | + }, |
| 255 | + ToolResultContentBlock::Json { |
| 256 | + value: json!({"status": "success", "count": 42}), |
| 257 | + }, |
| 258 | + ToolResultContentBlock::Text { |
| 259 | + text: "Processing complete.".to_string(), |
| 260 | + }, |
| 261 | + ]; |
| 262 | + group.bench_with_input( |
| 263 | + BenchmarkId::new("mixed_content", "text+json"), |
| 264 | + &mixed_content, |
| 265 | + |b, content| { |
| 266 | + b.iter(|| flatten_tool_result_content(black_box(content))); |
| 267 | + }, |
| 268 | + ); |
| 269 | + |
| 270 | + // Large content - simulating typical tool output |
| 271 | + let large_content: Vec<ToolResultContentBlock> = (0..50) |
| 272 | + .map(|i| { |
| 273 | + if i % 3 == 0 { |
| 274 | + ToolResultContentBlock::Json { |
| 275 | + value: json!({"line": i, "content": "x".repeat(100)}), |
| 276 | + } |
| 277 | + } else { |
| 278 | + ToolResultContentBlock::Text { |
| 279 | + text: format!("Line {}: {}", i, "some output content here"), |
| 280 | + } |
| 281 | + } |
| 282 | + }) |
| 283 | + .collect(); |
| 284 | + group.bench_with_input( |
| 285 | + BenchmarkId::new("large_content", "50_blocks"), |
| 286 | + &large_content, |
| 287 | + |b, content| { |
| 288 | + b.iter(|| flatten_tool_result_content(black_box(content))); |
| 289 | + }, |
| 290 | + ); |
| 291 | + |
| 292 | + group.finish(); |
| 293 | +} |
| 294 | + |
| 295 | +/// Benchmark is_reasoning_model detection |
| 296 | +fn bench_is_reasoning_model(c: &mut Criterion) { |
| 297 | + let mut group = c.benchmark_group("is_reasoning_model"); |
| 298 | + |
| 299 | + let models = vec![ |
| 300 | + ("gpt-4o", false), |
| 301 | + ("o1-mini", true), |
| 302 | + ("o3", true), |
| 303 | + ("grok-3", false), |
| 304 | + ("grok-3-mini", true), |
| 305 | + ("qwen/qwen-qwq-32b", true), |
| 306 | + ("qwen/qwen-plus", false), |
| 307 | + ]; |
| 308 | + |
| 309 | + for (model, expected) in models { |
| 310 | + group.bench_with_input( |
| 311 | + BenchmarkId::new(model, if expected { "reasoning" } else { "normal" }), |
| 312 | + model, |
| 313 | + |b, m| { |
| 314 | + b.iter(|| is_reasoning_model(black_box(m))); |
| 315 | + }, |
| 316 | + ); |
| 317 | + } |
| 318 | + |
| 319 | + group.finish(); |
| 320 | +} |
| 321 | + |
| 322 | +criterion_group!( |
| 323 | + benches, |
| 324 | + bench_translate_message, |
| 325 | + bench_build_request, |
| 326 | + bench_flatten_tool_result, |
| 327 | + bench_is_reasoning_model |
| 328 | +); |
| 329 | +criterion_main!(benches); |
0 commit comments