feat: Add external expert sources (Wikidata SPARQL + ConceptNet API)
- Add expert_cache.py: TTL-based in-memory cache (1 hour default) - Add expert_source_service.py: WikidataProvider and ConceptNetProvider - Wikidata SPARQL queries for occupations with Chinese labels - ConceptNet API queries for occupation-related concepts - Random selection from cached pool - Update schemas.py: Add ExpertSource enum (llm/wikidata/conceptnet) - Update ExpertTransformationRequest with expert_source and expert_language - Update router: Conditionally use external sources with LLM fallback - New SSE events: expert_source, expert_fallback - Update frontend types with ExpertSource 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -13,6 +13,7 @@ from ..models.schemas import (
|
||||
ExpertKeyword,
|
||||
ExpertTransformationCategoryResult,
|
||||
ExpertTransformationDescription,
|
||||
ExpertSource,
|
||||
)
|
||||
from ..prompts.expert_transformation_prompt import (
|
||||
get_expert_generation_prompt,
|
||||
@@ -20,6 +21,7 @@ from ..prompts.expert_transformation_prompt import (
|
||||
get_single_description_prompt,
|
||||
)
|
||||
from ..services.llm_service import ollama_provider, extract_json_from_response
|
||||
from ..services.expert_source_service import expert_source_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/expert-transformation", tags=["expert-transformation"])
|
||||
@@ -35,36 +37,98 @@ async def generate_expert_transformation_events(
|
||||
model = request.model
|
||||
|
||||
# ========== Step 0: Generate expert team ==========
|
||||
yield f"event: expert_start\ndata: {json.dumps({'message': '正在組建專家團隊...'}, ensure_ascii=False)}\n\n"
|
||||
yield f"event: expert_start\ndata: {json.dumps({'message': '正在組建專家團隊...', 'source': request.expert_source.value}, ensure_ascii=False)}\n\n"
|
||||
|
||||
experts: List[ExpertProfile] = []
|
||||
actual_source = request.expert_source.value
|
||||
|
||||
try:
|
||||
expert_prompt = get_expert_generation_prompt(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=request.custom_experts
|
||||
)
|
||||
logger.info(f"Expert prompt: {expert_prompt[:200]}")
|
||||
# 決定使用哪種來源生成專家
|
||||
use_llm = (
|
||||
request.expert_source == ExpertSource.LLM or
|
||||
request.custom_experts # 有自訂專家時,使用 LLM 補充
|
||||
)
|
||||
|
||||
expert_response = await ollama_provider.generate(
|
||||
expert_prompt, model=model, temperature=temperature
|
||||
)
|
||||
logger.info(f"Expert response: {expert_response[:500]}")
|
||||
if use_llm:
|
||||
# LLM 生成專家
|
||||
try:
|
||||
expert_prompt = get_expert_generation_prompt(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=request.custom_experts
|
||||
)
|
||||
logger.info(f"Expert prompt: {expert_prompt[:200]}")
|
||||
|
||||
expert_data = extract_json_from_response(expert_response)
|
||||
experts_raw = expert_data.get("experts", [])
|
||||
expert_response = await ollama_provider.generate(
|
||||
expert_prompt, model=model, temperature=temperature
|
||||
)
|
||||
logger.info(f"Expert response: {expert_response[:500]}")
|
||||
|
||||
for exp in experts_raw:
|
||||
if isinstance(exp, dict) and all(k in exp for k in ["id", "name", "domain"]):
|
||||
experts.append(ExpertProfile(**exp))
|
||||
expert_data = extract_json_from_response(expert_response)
|
||||
experts_raw = expert_data.get("experts", [])
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate experts: {e}")
|
||||
yield f"event: error\ndata: {json.dumps({'error': f'專家團隊生成失敗: {str(e)}'}, ensure_ascii=False)}\n\n"
|
||||
return
|
||||
for exp in experts_raw:
|
||||
if isinstance(exp, dict) and all(k in exp for k in ["id", "name", "domain"]):
|
||||
experts.append(ExpertProfile(**exp))
|
||||
|
||||
actual_source = "llm"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate experts via LLM: {e}")
|
||||
yield f"event: error\ndata: {json.dumps({'error': f'專家團隊生成失敗: {str(e)}'}, ensure_ascii=False)}\n\n"
|
||||
return
|
||||
else:
|
||||
# 外部來源生成專家
|
||||
try:
|
||||
experts_data, actual_source = await expert_source_service.get_experts(
|
||||
source=request.expert_source.value,
|
||||
count=request.expert_count,
|
||||
language=request.expert_language
|
||||
)
|
||||
|
||||
for i, exp_data in enumerate(experts_data):
|
||||
experts.append(ExpertProfile(
|
||||
id=f"expert-{i}",
|
||||
name=exp_data["name"],
|
||||
domain=exp_data["domain"],
|
||||
perspective=f"從{exp_data['domain']}角度思考"
|
||||
))
|
||||
|
||||
logger.info(f"Generated {len(experts)} experts from {actual_source}")
|
||||
|
||||
except Exception as e:
|
||||
# 外部來源失敗,fallback 到 LLM
|
||||
logger.warning(f"External source failed: {e}, falling back to LLM")
|
||||
yield f"event: expert_fallback\ndata: {json.dumps({'original': request.expert_source.value, 'fallback': 'llm', 'reason': str(e)}, ensure_ascii=False)}\n\n"
|
||||
|
||||
try:
|
||||
expert_prompt = get_expert_generation_prompt(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=request.custom_experts
|
||||
)
|
||||
|
||||
expert_response = await ollama_provider.generate(
|
||||
expert_prompt, model=model, temperature=temperature
|
||||
)
|
||||
|
||||
expert_data = extract_json_from_response(expert_response)
|
||||
experts_raw = expert_data.get("experts", [])
|
||||
|
||||
for exp in experts_raw:
|
||||
if isinstance(exp, dict) and all(k in exp for k in ["id", "name", "domain"]):
|
||||
experts.append(ExpertProfile(**exp))
|
||||
|
||||
actual_source = "llm"
|
||||
|
||||
except Exception as llm_error:
|
||||
logger.error(f"LLM fallback also failed: {llm_error}")
|
||||
yield f"event: error\ndata: {json.dumps({'error': f'專家團隊生成失敗: {str(llm_error)}'}, ensure_ascii=False)}\n\n"
|
||||
return
|
||||
|
||||
# 回報來源資訊
|
||||
yield f"event: expert_source\ndata: {json.dumps({'source': actual_source}, ensure_ascii=False)}\n\n"
|
||||
yield f"event: expert_complete\ndata: {json.dumps({'experts': [e.model_dump() for e in experts]}, ensure_ascii=False)}\n\n"
|
||||
|
||||
if not experts:
|
||||
|
||||
Reference in New Issue
Block a user