feat: Add curated expert occupations with local data sources
- Add curated occupations seed files (210 entries in zh/en) with specific domains - Add DBpedia occupations data (2164 entries) for external source option - Refactor expert_source_service to read from local JSON files - Improve keyword generation prompts to leverage expert domain context - Add architecture analysis documentation (ARCHITECTURE_ANALYSIS.md) - Fix expert source selection bug (proper handling of empty custom_experts) - Update frontend to support curated/dbpedia/wikidata expert sources Key changes: - backend/app/data/: Local occupation data files - backend/app/services/expert_source_service.py: Simplified local file reading - backend/app/prompts/expert_transformation_prompt.py: Better domain-aware prompts - Removed expert_cache.py (no longer needed with local files) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -37,16 +37,29 @@ async def generate_expert_transformation_events(
|
||||
model = request.model
|
||||
|
||||
# ========== Step 0: Generate expert team ==========
|
||||
logger.info(f"[DEBUG] expert_source from request: {request.expert_source}")
|
||||
logger.info(f"[DEBUG] expert_source value: {request.expert_source.value}")
|
||||
logger.info(f"[DEBUG] custom_experts: {request.custom_experts}")
|
||||
|
||||
yield f"event: expert_start\ndata: {json.dumps({'message': '正在組建專家團隊...', 'source': request.expert_source.value}, ensure_ascii=False)}\n\n"
|
||||
|
||||
experts: List[ExpertProfile] = []
|
||||
actual_source = request.expert_source.value
|
||||
|
||||
# 過濾出實際有內容的自訂專家(排除空字串)
|
||||
actual_custom_experts = [
|
||||
e.strip() for e in (request.custom_experts or [])
|
||||
if e and e.strip()
|
||||
]
|
||||
logger.info(f"[DEBUG] actual_custom_experts (filtered): {actual_custom_experts}")
|
||||
|
||||
# 決定使用哪種來源生成專家
|
||||
# 只有在明確選擇 LLM 或有實際自訂專家時才使用 LLM
|
||||
use_llm = (
|
||||
request.expert_source == ExpertSource.LLM or
|
||||
request.custom_experts # 有自訂專家時,使用 LLM 補充
|
||||
len(actual_custom_experts) > 0 # 有實際自訂專家時,使用 LLM 補充
|
||||
)
|
||||
logger.info(f"[DEBUG] use_llm decision: {use_llm}")
|
||||
|
||||
if use_llm:
|
||||
# LLM 生成專家
|
||||
@@ -55,7 +68,7 @@ async def generate_expert_transformation_events(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=request.custom_experts
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None
|
||||
)
|
||||
logger.info(f"Expert prompt: {expert_prompt[:200]}")
|
||||
|
||||
@@ -78,9 +91,9 @@ async def generate_expert_transformation_events(
|
||||
yield f"event: error\ndata: {json.dumps({'error': f'專家團隊生成失敗: {str(e)}'}, ensure_ascii=False)}\n\n"
|
||||
return
|
||||
else:
|
||||
# 外部來源生成專家
|
||||
# 外部來源生成專家 (本地檔案,同步)
|
||||
try:
|
||||
experts_data, actual_source = await expert_source_service.get_experts(
|
||||
experts_data, actual_source = expert_source_service.get_experts(
|
||||
source=request.expert_source.value,
|
||||
count=request.expert_count,
|
||||
language=request.expert_language
|
||||
@@ -106,7 +119,7 @@ async def generate_expert_transformation_events(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=request.custom_experts
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None
|
||||
)
|
||||
|
||||
expert_response = await ollama_provider.generate(
|
||||
|
||||
Reference in New Issue
Block a user