Skip to content

Commit 3194987

Browse files
niiish32x越鸿claude
authored
feat: Agent 用户身份识别 + RBAC 登录修复 + 模型配置同步 (#193)
Co-authored-by: 越鸿 <nishenghao.nsh@oceanbase.com> Co-authored-by: Claude Opus 4.7 <noreply@anthropic.com>
1 parent d0e8d67 commit 3194987

27 files changed

Lines changed: 1376 additions & 172 deletions

File tree

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,3 +199,6 @@ configs/local
199199
/pilot/datasets/
200200
/.claude/
201201
/pilot/dataset/
202+
/openspec/
203+
/configs/derisk-local.toml
204+
/CLAUDE.md

assets/schema/derisk.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ use derisk;
99
-- MySQL DDL Script for Derisk
1010
-- Version: 0.3.0
1111
-- Generated from SQLAlchemy ORM Models
12-
-- Generated: 2026-03-30 22:06:22
12+
-- Generated: 2026-04-25 20:46:07
1313
-- ============================================================
1414

1515
SET NAMES utf8mb4;

configs/derisk-ob.toml

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,16 +31,36 @@ provider = "openai"
3131
api_base = "https://dashscope.aliyuncs.com/compatible-mode/v1"
3232
api_key = "${DASHSCOPE_API_KEY:-sk-...}"
3333

34+
[[agent.llm.provider.model]]
35+
name = "deepseek-v3"
36+
temperature = 0.7
37+
max_new_tokens = 4096
38+
[[agent.llm.provider.model]]
39+
name = "deepseek-v3.1"
40+
temperature = 0.7
41+
max_new_tokens = 4096
3442
[[agent.llm.provider.model]]
3543
name = "deepseek-r1"
3644
temperature = 0.7
3745
max_new_tokens = 4096
3846
[[agent.llm.provider.model]]
39-
name = "deepseek-v3"
47+
name = "kimi-k2.5"
48+
temperature = 0.7
49+
max_new_tokens = 4096
50+
[[agent.llm.provider.model]]
51+
name = "kimi-k2.6"
52+
temperature = 0.7
53+
max_new_tokens = 4096
54+
[[agent.llm.provider.model]]
55+
name = "qwen3.5-plus"
56+
temperature = 0.7
57+
max_new_tokens = 4096
58+
[[agent.llm.provider.model]]
59+
name = "qwen3.5-flash"
4060
temperature = 0.7
4161
max_new_tokens = 4096
4262
[[agent.llm.provider.model]]
43-
name = "Kimi-k2"
63+
name = "qwen3-max"
4464
temperature = 0.7
4565
max_new_tokens = 4096
4666
[[agent.llm.provider.model]]
@@ -51,7 +71,16 @@ max_new_tokens = 4096
5171
name = "qwen-vl-max"
5272
temperature = 0.7
5373
max_new_tokens = 4096
54-
74+
is_multimodal = true
75+
[[agent.llm.provider.model]]
76+
name = "qwen3-vl-plus"
77+
temperature = 0.7
78+
max_new_tokens = 4096
79+
is_multimodal = true
80+
[[agent.llm.provider.model]]
81+
name = "qwq-plus"
82+
temperature = 0.7
83+
max_new_tokens = 4096
5584
[[agent.llm.provider.model]]
5685
name = "glm-5"
5786
temperature = 0.7

configs/derisk-proxy-aliyun.toml

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,16 +31,36 @@ provider = "openai"
3131
api_base = "https://dashscope.aliyuncs.com/compatible-mode/v1"
3232
api_key = "${DASHSCOPE_API_KEY_2:-sk-...}"
3333

34+
[[agent.llm.provider.model]]
35+
name = "deepseek-v3"
36+
temperature = 0.7
37+
max_new_tokens = 4096
38+
[[agent.llm.provider.model]]
39+
name = "deepseek-v3.1"
40+
temperature = 0.7
41+
max_new_tokens = 4096
3442
[[agent.llm.provider.model]]
3543
name = "deepseek-r1"
3644
temperature = 0.7
3745
max_new_tokens = 4096
3846
[[agent.llm.provider.model]]
39-
name = "deepseek-v3"
47+
name = "kimi-k2.5"
48+
temperature = 0.7
49+
max_new_tokens = 4096
50+
[[agent.llm.provider.model]]
51+
name = "kimi-k2.6"
52+
temperature = 0.7
53+
max_new_tokens = 4096
54+
[[agent.llm.provider.model]]
55+
name = "qwen3.5-plus"
56+
temperature = 0.7
57+
max_new_tokens = 4096
58+
[[agent.llm.provider.model]]
59+
name = "qwen3.5-flash"
4060
temperature = 0.7
4161
max_new_tokens = 4096
4262
[[agent.llm.provider.model]]
43-
name = "Kimi-k2"
63+
name = "qwen3-max"
4464
temperature = 0.7
4565
max_new_tokens = 4096
4666
[[agent.llm.provider.model]]
@@ -51,13 +71,20 @@ max_new_tokens = 4096
5171
name = "qwen-vl-max"
5272
temperature = 0.7
5373
max_new_tokens = 4096
54-
is_multimodal = true # 多模态模型,支持图片输入
55-
74+
is_multimodal = true
75+
[[agent.llm.provider.model]]
76+
name = "qwen3-vl-plus"
77+
temperature = 0.7
78+
max_new_tokens = 4096
79+
is_multimodal = true
80+
[[agent.llm.provider.model]]
81+
name = "qwq-plus"
82+
temperature = 0.7
83+
max_new_tokens = 4096
5684
[[agent.llm.provider.model]]
5785
name = "glm-5"
5886
temperature = 0.7
5987
max_new_tokens = 4096
60-
is_multimodal = true # 多模态模型,支持图片输入
6188

6289
[[serves]]
6390
type = "file"

configs/derisk-proxy-openai.toml

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -40,24 +40,21 @@ name = "deepseek-v3"
4040
temperature = 0.7
4141
max_new_tokens = 4096
4242
[[agent.llm.provider.model]]
43-
name = "Kimi-k2"
43+
name = "gpt-4o"
4444
temperature = 0.7
4545
max_new_tokens = 4096
4646
[[agent.llm.provider.model]]
47-
name = "qwen-plus"
47+
name = "gpt-4o-mini"
4848
temperature = 0.7
4949
max_new_tokens = 4096
5050
[[agent.llm.provider.model]]
51-
name = "qwen-vl-max"
51+
name = "o1"
5252
temperature = 0.7
5353
max_new_tokens = 4096
54-
is_multimodal = true # 多模态模型,支持图片输入
55-
5654
[[agent.llm.provider.model]]
57-
name = "glm-5"
55+
name = "o3-mini"
5856
temperature = 0.7
5957
max_new_tokens = 4096
60-
is_multimodal = true # 多模态模型,支持图片输入
6158

6259
[[serves]]
6360
type = "file"

packages/derisk-app/pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ dependencies = [
1818
"aiofiles",
1919
"pyparsing",
2020
"aiosqlite",
21+
"bcrypt",
2122
]
2223

2324
[project.urls]

packages/derisk-app/src/derisk_app/app.py

Lines changed: 132 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
import os
3-
from typing import Optional
3+
from typing import Any, Dict, Optional
44

55
from fastapi import FastAPI
66
from fastapi.middleware.cors import CORSMiddleware
@@ -252,6 +252,7 @@ def _sync_oauth2_config_from_db():
252252
enabled=db_oauth2.get("enabled", False),
253253
providers=db_oauth2.get("providers", []),
254254
admin_users=db_oauth2.get("admin_users", []),
255+
default_role=db_oauth2.get("default_role", "viewer"),
255256
)
256257
cfg.oauth2 = oauth2_config
257258
logger.info(
@@ -263,12 +264,123 @@ def _sync_oauth2_config_from_db():
263264
logger.warning(f"Failed to sync OAuth2 from database: {e}")
264265

265266

267+
def _convert_toml_agent_llm_to_json_format(toml_agent_llm: Dict[str, Any]) -> Dict[str, Any]:
268+
"""Convert TOML agent.llm format to JSON agent_llm format.
269+
270+
TOML format (agent.llm):
271+
{ "temperature": 0.5, "provider": [{ "provider": "openai", "api_base": "...", "model": [...] }] }
272+
273+
JSON format (agent_llm):
274+
{ "temperature": 0.5, "providers": [{ "provider": "openai", "api_base": "...", "models": [...] }] }
275+
"""
276+
result = dict(toml_agent_llm)
277+
providers = result.pop("provider", [])
278+
json_providers = []
279+
for p in providers:
280+
if not isinstance(p, dict):
281+
continue
282+
converted = dict(p)
283+
if "model" in converted:
284+
converted["models"] = converted.pop("model")
285+
json_providers.append(converted)
286+
result["providers"] = json_providers
287+
return result
288+
289+
290+
def _bootstrap_toml_providers_to_json(
291+
toml_llm: Dict[str, Any], cfg: Any
292+
) -> bool:
293+
"""Sync TOML providers into derisk.json config.
294+
295+
TOML is the source of truth for model configuration. When TOML defines
296+
providers, they are always synced into derisk.json so the Web UI and
297+
ModelConfigCache see the latest models.
298+
299+
Returns True if providers were updated, False otherwise.
300+
"""
301+
if not toml_llm or not toml_llm.get("provider"):
302+
return False
303+
304+
from derisk_core.config import ConfigManager
305+
from derisk_core.config.schema import (
306+
AgentLLMConfig,
307+
LLMProviderConfig,
308+
LLMProviderModelConfig,
309+
)
310+
311+
json_llm_dict = _convert_toml_agent_llm_to_json_format(toml_llm)
312+
toml_providers = json_llm_dict.get("providers", [])
313+
if not toml_providers:
314+
return False
315+
316+
new_providers = []
317+
for p_dict in toml_providers:
318+
models = [
319+
LLMProviderModelConfig(
320+
name=m.get("name", ""),
321+
temperature=m.get("temperature", 0.7),
322+
max_new_tokens=m.get("max_new_tokens", 4096),
323+
is_multimodal=m.get("is_multimodal", False),
324+
)
325+
for m in p_dict.get("models", [])
326+
if isinstance(m, dict) and m.get("name")
327+
]
328+
if models:
329+
new_providers.append(
330+
LLMProviderConfig(
331+
provider=p_dict.get("provider", "openai"),
332+
api_base=p_dict.get("api_base", ""),
333+
api_key_ref=p_dict.get("api_key_ref", ""),
334+
models=models,
335+
)
336+
)
337+
338+
if not new_providers:
339+
return False
340+
341+
# Migrate api_key from TOML into encrypted secrets, set api_key_ref
342+
for p_dict in toml_providers:
343+
api_key = p_dict.get("api_key", "")
344+
if api_key:
345+
provider_name = p_dict.get("provider", "openai")
346+
try:
347+
from derisk_core.config.encryption import save_secrets, load_secrets
348+
349+
secrets = load_secrets() or {}
350+
secret_key = f"{provider_name}_api_key"
351+
secrets[secret_key] = api_key
352+
save_secrets(secrets)
353+
354+
for p in new_providers:
355+
if p.provider == provider_name:
356+
p.api_key_ref = f"${{secrets.{secret_key}}}"
357+
logger.info(f"API key for {provider_name} saved to secrets")
358+
except Exception as e:
359+
logger.warning(f"Failed to save API key to secrets: {e}")
360+
361+
cfg.agent_llm = AgentLLMConfig(
362+
temperature=json_llm_dict.get("temperature", 0.5),
363+
providers=new_providers,
364+
)
365+
ConfigManager.save()
366+
367+
model_names = []
368+
for p in new_providers:
369+
model_names.extend(m.name for m in p.models)
370+
logger.info(
371+
f"TOML providers synced to derisk.json: "
372+
f"{len(new_providers)} providers, {len(model_names)} models: {model_names}"
373+
)
374+
return True
375+
376+
266377
def _sync_app_config_to_system_app():
267-
"""Sync JSON config (agent_llm, default_model, etc.) to system_app.config on startup.
378+
"""Sync LLM config to system_app.config on startup.
268379
269-
This ensures that after restart, the LLM configuration saved in derisk.json
270-
is properly loaded into system_app.config and ModelConfigCache, making models
271-
available immediately without needing manual refresh.
380+
Priority: TOML config > JSON config. If TOML defines providers, they always
381+
override the JSON (derisk.json) providers. This ensures edits to TOML files
382+
take effect on next restart, while the Web UI can still hot-reload within a
383+
session (Web UI writes to derisk.json, which takes effect until next restart).
272384
"""
273385
try:
274386
from derisk_core.config import ConfigManager
@@ -295,10 +407,25 @@ def _sync_app_config_to_system_app():
295407
_convert_agent_llm_to_system_format,
296408
)
297409

410+
# TOML config is the source of truth — always sync to derisk.json
411+
toml_agent = system_app.config.get("agent", None)
412+
toml_llm = toml_agent.get("llm", {}) if isinstance(toml_agent, dict) else None
413+
if toml_llm and toml_llm.get("provider"):
414+
_bootstrap_toml_providers_to_json(toml_llm, cfg)
415+
# Refresh after sync
416+
agent_llm_conf = cfg.agent_llm
417+
298418
agent_llm_dict = _convert_agent_llm_to_system_format(agent_llm_conf)
299419

300420
system_app.config.set("agent.llm", agent_llm_dict)
301421

422+
# Also update app_config so model list endpoints see the latest providers.
423+
# The endpoints check app_config.agent_llm first (PRIORITY 1), so it must
424+
# be kept in sync; otherwise stale data blocks the fallback paths.
425+
app_config = system_app.config.configs.get("app_config")
426+
if app_config and hasattr(app_config, "agent_llm"):
427+
app_config.agent_llm = agent_llm_conf
428+
302429
model_configs = parse_provider_configs(agent_llm_dict)
303430
if model_configs:
304431
ModelConfigCache.register_configs(model_configs)

0 commit comments

Comments
 (0)