diff --git a/backend/app/main.py b/backend/app/main.py index c8a100d..db3224b 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -3,7 +3,7 @@ from contextlib import asynccontextmanager from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -from .routers import attributes +from .routers import attributes, transformation, expert_transformation from .services.llm_service import ollama_provider @@ -29,6 +29,8 @@ app.add_middleware( ) app.include_router(attributes.router) +app.include_router(transformation.router) +app.include_router(expert_transformation.router) @app.get("/") diff --git a/backend/app/models/schemas.py b/backend/app/models/schemas.py index 90a851f..2b7451c 100644 --- a/backend/app/models/schemas.py +++ b/backend/app/models/schemas.py @@ -131,3 +131,92 @@ class DAGRelationship(BaseModel): source: str # source attribute name target_category: str target: str # target attribute name + + +# ===== Transformation Agent schemas ===== + +class TransformationRequest(BaseModel): + """Transformation Agent 請求""" + query: str # 原始查詢 (e.g., "腳踏車") + category: str # 類別名稱 (e.g., "功能") + attributes: List[str] # 該類別的屬性列表 + model: Optional[str] = None + temperature: Optional[float] = 0.7 + keyword_count: int = 3 # 要生成的新關鍵字數量 + + +class TransformationDescription(BaseModel): + """單一轉換描述""" + keyword: str # 新關鍵字 + description: str # 與 query 結合的描述 + + +class TransformationCategoryResult(BaseModel): + """單一類別的轉換結果""" + category: str + original_attributes: List[str] # 原始屬性 + new_keywords: List[str] # 新生成的關鍵字 + descriptions: List[TransformationDescription] + + +class TransformationDAGResult(BaseModel): + """完整 Transformation 結果""" + query: str + results: List[TransformationCategoryResult] + + +# ===== Expert Transformation Agent schemas ===== + +class ExpertProfile(BaseModel): + """專家檔案""" + id: str # e.g., "expert-0" + name: str # e.g., "藥師" + domain: str # e.g., "醫療與健康" + perspective: Optional[str] = None # e.g., "從藥物與健康管理角度思考" + + +class ExpertKeyword(BaseModel): + """專家視角生成的關鍵字""" + keyword: str # 關鍵字本身 + expert_id: str # 哪個專家生成的 + expert_name: str # 專家名稱(冗餘,方便前端) + source_attribute: str # 來自哪個原始屬性 + + +class ExpertTransformationDescription(BaseModel): + """專家關鍵字的描述""" + keyword: str + expert_id: str + expert_name: str + description: str + + +class ExpertTransformationCategoryResult(BaseModel): + """單一類別的轉換結果(專家版)""" + category: str + original_attributes: List[str] + expert_keywords: List[ExpertKeyword] # 所有專家生成的關鍵字 + descriptions: List[ExpertTransformationDescription] + + +class ExpertTransformationDAGResult(BaseModel): + """完整轉換結果(專家版)""" + query: str + experts: List[ExpertProfile] # 使用的專家列表 + results: List[ExpertTransformationCategoryResult] + + +class ExpertTransformationRequest(BaseModel): + """Expert Transformation Agent 請求""" + query: str + category: str + attributes: List[str] + + # Expert parameters + expert_count: int = 3 # 專家數量 (2-8) + keywords_per_expert: int = 1 # 每個專家為每個屬性生成幾個關鍵字 (1-3) + custom_experts: Optional[List[str]] = None # 用戶指定專家 ["藥師", "工程師"] + + # LLM parameters + model: Optional[str] = None + temperature: Optional[float] = 0.7 diff --git a/backend/app/prompts/expert_transformation_prompt.py b/backend/app/prompts/expert_transformation_prompt.py new file mode 100644 index 0000000..6cc304f --- /dev/null +++ b/backend/app/prompts/expert_transformation_prompt.py @@ -0,0 +1,78 @@ +"""Expert Transformation Agent 提示詞模組""" + +from typing import List, Optional + + +def get_expert_generation_prompt( + query: str, + categories: List[str], + expert_count: int, + custom_experts: Optional[List[str]] = None +) -> str: + """Step 0: 生成專家團隊(不依賴主題,純隨機多元)""" + custom_text = "" + if custom_experts and len(custom_experts) > 0: + custom_text = f"(已指定:{', '.join(custom_experts[:expert_count])})" + + return f"""/no_think +隨機組建 {expert_count} 個來自完全不同領域的專家團隊{custom_text}。 + +回傳 JSON: +{{"experts": [{{"id": "expert-0", "name": "職業", "domain": "領域", "perspective": "角度"}}, ...]}} + +規則: +- id 為 expert-0 到 expert-{expert_count - 1} +- name 填寫職業名稱(非人名),2-5字 +- 各專家的 domain 必須來自截然不同的領域,越多元越好""" + + +def get_expert_keyword_generation_prompt( + category: str, + attribute: str, + experts: List[dict], # List[ExpertProfile] + keywords_per_expert: int = 1 +) -> str: + """Step 1: 專家視角關鍵字生成""" + experts_info = ", ".join([f"{exp['id']}:{exp['name']}({exp['domain']})" for exp in experts]) + + return f"""/no_think +專家團隊:{experts_info} +屬性:「{attribute}」({category}) + +每位專家從自己的專業視角為此屬性生成 {keywords_per_expert} 個創新關鍵字(2-6字)。 +關鍵字要反映該專家領域的獨特思考方式。 + +回傳 JSON: +{{"keywords": [{{"keyword": "詞彙", "expert_id": "expert-X", "expert_name": "名稱"}}, ...]}} + +共需 {len(experts) * keywords_per_expert} 個關鍵字。""" + + +def get_expert_batch_description_prompt( + query: str, + category: str, + expert_keywords: List[dict] # List[ExpertKeyword] +) -> str: + """Step 2: 批次生成專家關鍵字的描述""" + keywords_info = ", ".join([ + f"{kw['expert_name']}:{kw['keyword']}" + for kw in expert_keywords + ]) + + # 建立 keyword -> (expert_id, expert_name) 的對照 + keyword_expert_map = ", ".join([ + f"{kw['keyword']}→{kw['expert_id']}/{kw['expert_name']}" + for kw in expert_keywords + ]) + + return f"""/no_think +物件:「{query}」 +關鍵字(專家:詞彙):{keywords_info} +對照:{keyword_expert_map} + +為每個關鍵字生成創新描述(15-30字),說明如何將該概念應用到「{query}」上。 + +回傳 JSON: +{{"descriptions": [{{"keyword": "詞彙", "expert_id": "expert-X", "expert_name": "名稱", "description": "應用描述"}}, ...]}} + +共需 {len(expert_keywords)} 個描述。""" diff --git a/backend/app/prompts/transformation_prompt.py b/backend/app/prompts/transformation_prompt.py new file mode 100644 index 0000000..415456a --- /dev/null +++ b/backend/app/prompts/transformation_prompt.py @@ -0,0 +1,97 @@ +"""Transformation Agent 提示詞模組""" + +from typing import List + + +def get_keyword_generation_prompt( + category: str, + attributes: List[str], + keyword_count: int = 3 +) -> str: + """ + Step 1: 生成新關鍵字 + + 給定類別和現有屬性,生成全新的、有創意的關鍵字。 + 不考慮原始查詢,只專注於類別本身可能的延伸。 + """ + attrs_text = "、".join(attributes) + + return f"""/no_think +你是一個創意發想專家。給定一個類別和該類別下的現有屬性,請生成全新的、有創意的關鍵字或描述片段。 + +【類別】{category} +【現有屬性】{attrs_text} + +【重要規則】 +1. 生成 {keyword_count} 個全新的關鍵字 +2. 關鍵字必須符合「{category}」這個類別的範疇 +3. 關鍵字要有創意,不能與現有屬性重複或太相似 +4. 不要考慮任何特定物件,只專注於這個類別本身可能的延伸 +5. 每個關鍵字應該是 2-6 個字的詞彙或短語 + +只回傳 JSON: +{{ + "keywords": ["關鍵字1", "關鍵字2", "關鍵字3"] +}}""" + + +def get_description_generation_prompt( + query: str, + category: str, + keyword: str +) -> str: + """ + Step 2: 結合原始查詢生成描述 + + 用新關鍵字創造一個與原始查詢相關的創新應用描述。 + """ + return f"""/no_think +你是一個創新應用專家。請將一個新的關鍵字概念應用到特定物件上,創造出創新的應用描述。 + +【物件】{query} +【類別】{category} +【新關鍵字】{keyword} + +【任務】 +請用「{keyword}」這個概念,為「{query}」創造一個創新的應用描述。 +描述應該是一個完整的句子或短語,說明如何將這個新概念應用到物件上。 + +【範例格式】 +- 如果物件是「腳踏車」,關鍵字是「監視」,可以生成「腳踏車監視騎乘者的身體健康狀況」 +- 如果物件是「雨傘」,關鍵字是「發電」,可以生成「雨傘利用雨滴撞擊發電」 + +只回傳 JSON: +{{ + "description": "創新應用描述" +}}""" + + +def get_batch_description_prompt( + query: str, + category: str, + keywords: List[str] +) -> str: + """ + 批次生成描述(可選的優化版本,一次處理多個關鍵字) + """ + keywords_text = "、".join(keywords) + keywords_json = ", ".join([f'"{k}"' for k in keywords]) + + return f"""/no_think +你是一個創新應用專家。請將多個新的關鍵字概念應用到特定物件上,為每個關鍵字創造創新的應用描述。 + +【物件】{query} +【類別】{category} +【新關鍵字】{keywords_text} + +【任務】 +為每個關鍵字創造一個與「{query}」相關的創新應用描述。 +每個描述應該是一個完整的句子或短語。 + +只回傳 JSON: +{{ + "descriptions": [ + {{"keyword": "關鍵字1", "description": "描述1"}}, + {{"keyword": "關鍵字2", "description": "描述2"}} + ] +}}""" diff --git a/backend/app/routers/expert_transformation.py b/backend/app/routers/expert_transformation.py new file mode 100644 index 0000000..8a0d737 --- /dev/null +++ b/backend/app/routers/expert_transformation.py @@ -0,0 +1,185 @@ +"""Expert Transformation Agent 路由模組""" + +import json +import logging +from typing import AsyncGenerator, List + +from fastapi import APIRouter +from fastapi.responses import StreamingResponse + +from ..models.schemas import ( + ExpertTransformationRequest, + ExpertProfile, + ExpertKeyword, + ExpertTransformationCategoryResult, + ExpertTransformationDescription, +) +from ..prompts.expert_transformation_prompt import ( + get_expert_generation_prompt, + get_expert_keyword_generation_prompt, + get_expert_batch_description_prompt, +) +from ..services.llm_service import ollama_provider, extract_json_from_response + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/api/expert-transformation", tags=["expert-transformation"]) + + +async def generate_expert_transformation_events( + request: ExpertTransformationRequest, + all_categories: List[str] # For expert generation context +) -> AsyncGenerator[str, None]: + """Generate SSE events for expert transformation process""" + try: + temperature = request.temperature if request.temperature is not None else 0.7 + model = request.model + + # ========== Step 0: Generate expert team ========== + yield f"event: expert_start\ndata: {json.dumps({'message': '正在組建專家團隊...'}, ensure_ascii=False)}\n\n" + + experts: List[ExpertProfile] = [] + + try: + expert_prompt = get_expert_generation_prompt( + query=request.query, + categories=all_categories, + expert_count=request.expert_count, + custom_experts=request.custom_experts + ) + logger.info(f"Expert prompt: {expert_prompt[:200]}") + + expert_response = await ollama_provider.generate( + expert_prompt, model=model, temperature=temperature + ) + logger.info(f"Expert response: {expert_response[:500]}") + + expert_data = extract_json_from_response(expert_response) + experts_raw = expert_data.get("experts", []) + + for exp in experts_raw: + if isinstance(exp, dict) and all(k in exp for k in ["id", "name", "domain"]): + experts.append(ExpertProfile(**exp)) + + except Exception as e: + logger.error(f"Failed to generate experts: {e}") + yield f"event: error\ndata: {json.dumps({'error': f'專家團隊生成失敗: {str(e)}'}, ensure_ascii=False)}\n\n" + return + + yield f"event: expert_complete\ndata: {json.dumps({'experts': [e.model_dump() for e in experts]}, ensure_ascii=False)}\n\n" + + if not experts: + yield f"event: error\ndata: {json.dumps({'error': '無法生成專家團隊'}, ensure_ascii=False)}\n\n" + return + + # ========== Step 1: Generate keywords from expert perspectives ========== + yield f"event: keyword_start\ndata: {json.dumps({'message': f'專家團隊為「{request.category}」的屬性生成關鍵字...'}, ensure_ascii=False)}\n\n" + + all_expert_keywords: List[ExpertKeyword] = [] + + # For each attribute, ask all experts to generate keywords + for attr_index, attribute in enumerate(request.attributes): + try: + kw_prompt = get_expert_keyword_generation_prompt( + category=request.category, + attribute=attribute, + experts=[e.model_dump() for e in experts], + keywords_per_expert=request.keywords_per_expert + ) + logger.info(f"Keyword prompt for '{attribute}': {kw_prompt[:300]}") + + kw_response = await ollama_provider.generate( + kw_prompt, model=model, temperature=temperature + ) + logger.info(f"Keyword response for '{attribute}': {kw_response[:500]}") + + kw_data = extract_json_from_response(kw_response) + keywords_raw = kw_data.get("keywords", []) + + # Add source_attribute to each keyword + for kw in keywords_raw: + if isinstance(kw, dict) and all(k in kw for k in ["keyword", "expert_id", "expert_name"]): + all_expert_keywords.append(ExpertKeyword( + keyword=kw["keyword"], + expert_id=kw["expert_id"], + expert_name=kw["expert_name"], + source_attribute=attribute + )) + + # Emit progress + yield f"event: keyword_progress\ndata: {json.dumps({'attribute': attribute, 'count': len(keywords_raw)}, ensure_ascii=False)}\n\n" + + except Exception as e: + logger.warning(f"Failed to generate keywords for '{attribute}': {e}") + yield f"event: keyword_progress\ndata: {json.dumps({'attribute': attribute, 'count': 0, 'error': str(e)}, ensure_ascii=False)}\n\n" + # Continue with next attribute instead of stopping + + yield f"event: keyword_complete\ndata: {json.dumps({'total_keywords': len(all_expert_keywords)}, ensure_ascii=False)}\n\n" + + if not all_expert_keywords: + yield f"event: error\ndata: {json.dumps({'error': '無法生成關鍵字'}, ensure_ascii=False)}\n\n" + return + + # ========== Step 2: Generate descriptions for each expert keyword ========== + yield f"event: description_start\ndata: {json.dumps({'message': '為專家關鍵字生成創新應用描述...'}, ensure_ascii=False)}\n\n" + + descriptions: List[ExpertTransformationDescription] = [] + + try: + desc_prompt = get_expert_batch_description_prompt( + query=request.query, + category=request.category, + expert_keywords=[kw.model_dump() for kw in all_expert_keywords] + ) + logger.info(f"Description prompt: {desc_prompt[:300]}") + + desc_response = await ollama_provider.generate( + desc_prompt, model=model, temperature=temperature + ) + logger.info(f"Description response: {desc_response[:500]}") + + desc_data = extract_json_from_response(desc_response) + descriptions_raw = desc_data.get("descriptions", []) + + for desc in descriptions_raw: + if isinstance(desc, dict) and all(k in desc for k in ["keyword", "expert_id", "expert_name", "description"]): + descriptions.append(ExpertTransformationDescription(**desc)) + + except Exception as e: + logger.warning(f"Failed to generate descriptions: {e}") + # Continue without descriptions - at least we have keywords + + yield f"event: description_complete\ndata: {json.dumps({'count': len(descriptions)}, ensure_ascii=False)}\n\n" + + # ========== Build final result ========== + result = ExpertTransformationCategoryResult( + category=request.category, + original_attributes=request.attributes, + expert_keywords=all_expert_keywords, + descriptions=descriptions + ) + + final_data = { + "result": result.model_dump(), + "experts": [e.model_dump() for e in experts] + } + yield f"event: done\ndata: {json.dumps(final_data, ensure_ascii=False)}\n\n" + + except Exception as e: + logger.error(f"Expert transformation error: {e}", exc_info=True) + yield f"event: error\ndata: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n" + + +@router.post("/category") +async def expert_transform_category(request: ExpertTransformationRequest): + """處理單一類別的專家視角轉換""" + # Extract all categories from request (should be passed separately in production) + # For now, use just the single category + return StreamingResponse( + generate_expert_transformation_events(request, [request.category]), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + }, + ) diff --git a/backend/app/routers/transformation.py b/backend/app/routers/transformation.py new file mode 100644 index 0000000..2d12ad7 --- /dev/null +++ b/backend/app/routers/transformation.py @@ -0,0 +1,116 @@ +"""Transformation Agent 路由模組""" + +import json +import logging +from typing import AsyncGenerator, List + +from fastapi import APIRouter +from fastapi.responses import StreamingResponse + +from ..models.schemas import ( + TransformationRequest, + TransformationCategoryResult, + TransformationDescription, +) +from ..prompts.transformation_prompt import ( + get_keyword_generation_prompt, + get_batch_description_prompt, +) +from ..services.llm_service import ollama_provider, extract_json_from_response + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/api/transformation", tags=["transformation"]) + + +async def generate_transformation_events( + request: TransformationRequest +) -> AsyncGenerator[str, None]: + """Generate SSE events for transformation process""" + try: + temperature = request.temperature if request.temperature is not None else 0.7 + model = request.model + + # ========== Step 1: Generate new keywords ========== + yield f"event: keyword_start\ndata: {json.dumps({'message': f'為「{request.category}」生成新關鍵字...'}, ensure_ascii=False)}\n\n" + + keyword_prompt = get_keyword_generation_prompt( + category=request.category, + attributes=request.attributes, + keyword_count=request.keyword_count + ) + logger.info(f"Keyword prompt: {keyword_prompt[:200]}") + + keyword_response = await ollama_provider.generate( + keyword_prompt, model=model, temperature=temperature + ) + logger.info(f"Keyword response: {keyword_response[:500]}") + + keyword_data = extract_json_from_response(keyword_response) + new_keywords = keyword_data.get("keywords", []) + + yield f"event: keyword_complete\ndata: {json.dumps({'keywords': new_keywords}, ensure_ascii=False)}\n\n" + + if not new_keywords: + yield f"event: error\ndata: {json.dumps({'error': '無法生成新關鍵字'}, ensure_ascii=False)}\n\n" + return + + # ========== Step 2: Generate descriptions for each keyword ========== + yield f"event: description_start\ndata: {json.dumps({'message': '生成創新應用描述...'}, ensure_ascii=False)}\n\n" + + # Use batch description prompt for efficiency + desc_prompt = get_batch_description_prompt( + query=request.query, + category=request.category, + keywords=new_keywords + ) + logger.info(f"Description prompt: {desc_prompt[:300]}") + + desc_response = await ollama_provider.generate( + desc_prompt, model=model, temperature=temperature + ) + logger.info(f"Description response: {desc_response[:500]}") + + desc_data = extract_json_from_response(desc_response) + descriptions_raw = desc_data.get("descriptions", []) + + # Convert to TransformationDescription objects + descriptions: List[TransformationDescription] = [] + for desc in descriptions_raw: + if isinstance(desc, dict) and "keyword" in desc and "description" in desc: + descriptions.append(TransformationDescription( + keyword=desc["keyword"], + description=desc["description"] + )) + + yield f"event: description_complete\ndata: {json.dumps({'count': len(descriptions)}, ensure_ascii=False)}\n\n" + + # ========== Build final result ========== + result = TransformationCategoryResult( + category=request.category, + original_attributes=request.attributes, + new_keywords=new_keywords, + descriptions=descriptions + ) + + final_data = { + "result": result.model_dump() + } + yield f"event: done\ndata: {json.dumps(final_data, ensure_ascii=False)}\n\n" + + except Exception as e: + logger.error(f"Transformation error: {e}") + yield f"event: error\ndata: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n" + + +@router.post("/category") +async def transform_category(request: TransformationRequest): + """處理單一類別的轉換""" + return StreamingResponse( + generate_transformation_events(request), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + }, + ) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index dba3547..9c28d1f 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,11 +1,15 @@ -import { useState, useRef, useCallback } from 'react'; -import { ConfigProvider, Layout, theme, Typography, Space } from 'antd'; -import { ApartmentOutlined } from '@ant-design/icons'; +import { useState, useRef, useCallback, useEffect } from 'react'; +import { ConfigProvider, Layout, theme, Typography, Space, Tabs } from 'antd'; +import { ApartmentOutlined, ThunderboltOutlined } from '@ant-design/icons'; import { ThemeToggle } from './components/ThemeToggle'; import { InputPanel } from './components/InputPanel'; +import { TransformationInputPanel } from './components/TransformationInputPanel'; import { MindmapPanel } from './components/MindmapPanel'; +import { TransformationPanel } from './components/TransformationPanel'; import { useAttribute } from './hooks/useAttribute'; +import { getModels } from './services/api'; import type { MindmapDAGRef } from './components/MindmapDAG'; +import type { TransformationDAGRef } from './components/TransformationDAG'; import type { CategoryMode } from './types'; const { Header, Sider, Content } = Layout; @@ -18,12 +22,51 @@ interface VisualSettings { function App() { const [isDark, setIsDark] = useState(true); + const [activeTab, setActiveTab] = useState('attribute'); const { loading, progress, error, currentResult, history, analyze, loadFromHistory } = useAttribute(); const [visualSettings, setVisualSettings] = useState({ nodeSpacing: 32, fontSize: 14, }); const mindmapRef = useRef(null); + const transformationRef = useRef(null); + + // Transformation Agent settings + const [transformModel, setTransformModel] = useState(''); + const [transformTemperature, setTransformTemperature] = useState(0.7); + const [expertConfig, setExpertConfig] = useState<{ + expert_count: number; + keywords_per_expert: number; + custom_experts?: string[]; + }>({ + expert_count: 3, + keywords_per_expert: 1, + custom_experts: undefined, + }); + const [customExpertsInput, setCustomExpertsInput] = useState(''); + const [shouldStartTransform, setShouldStartTransform] = useState(false); + const [transformLoading, setTransformLoading] = useState(false); + + // Available models from API + const [availableModels, setAvailableModels] = useState([]); + + // Fetch models on mount + useEffect(() => { + async function fetchModels() { + try { + const response = await getModels(); + setAvailableModels(response.models); + // Set default model for transformation if not set + if (response.models.length > 0 && !transformModel) { + const defaultModel = response.models.find((m) => m.includes('qwen3')) || response.models[0]; + setTransformModel(defaultModel); + } + } catch (err) { + console.error('Failed to fetch models:', err); + } + } + fetchModels(); + }, []); const handleAnalyze = async ( query: string, @@ -41,6 +84,10 @@ function App() { mindmapRef.current?.resetView(); }, []); + const handleTransform = useCallback(() => { + setShouldStartTransform(true); + }, []); + return ( - Attribute Agent + Novelty Seeking @@ -95,13 +142,58 @@ function App() { overflow: 'hidden', }} > - + + Attribute Agent + + ), + children: ( +
+ +
+ ), + }, + { + key: 'transformation', + label: ( + + + Transformation Agent + + ), + children: ( +
+ setShouldStartTransform(false)} + onLoadingChange={setTransformLoading} + /> +
+ ), + }, + ]} /> - + {activeTab === 'attribute' ? ( + + ) : ( + + )} diff --git a/frontend/src/components/TransformationDAG.tsx b/frontend/src/components/TransformationDAG.tsx new file mode 100644 index 0000000..161788a --- /dev/null +++ b/frontend/src/components/TransformationDAG.tsx @@ -0,0 +1,106 @@ +import { forwardRef, useImperativeHandle } from 'react'; +import { + ReactFlow, + useReactFlow, + ReactFlowProvider, + Background, +} from '@xyflow/react'; +import '@xyflow/react/dist/style.css'; + +import type { + TransformationDAGResult, + ExpertTransformationDAGResult, + CategoryDefinition +} from '../types'; +import { + transformationNodeTypes, + useTransformationLayout, + useExpertTransformationLayout +} from './transformation'; +import '../styles/mindmap.css'; + +interface TransformationDAGProps { + data: TransformationDAGResult | ExpertTransformationDAGResult; + categories: CategoryDefinition[]; + isDark: boolean; +} + +export interface TransformationDAGRef { + resetView: () => void; +} + +const TransformationDAGInner = forwardRef( + ({ data, categories, isDark }, ref) => { + const { setViewport } = useReactFlow(); + + // Check if data is ExpertTransformationDAGResult by checking for 'experts' property + const isExpertTransformation = 'experts' in data; + + // Use appropriate layout hook based on data type + const regularLayout = useTransformationLayout( + !isExpertTransformation ? (data as TransformationDAGResult) : null, + categories, + { isDark, fontSize: 13 } + ); + + const expertLayout = useExpertTransformationLayout( + isExpertTransformation ? (data as ExpertTransformationDAGResult) : null, + categories, + { isDark, fontSize: 13 } + ); + + const { nodes, edges } = isExpertTransformation ? expertLayout : regularLayout; + + useImperativeHandle( + ref, + () => ({ + resetView: () => { + setViewport({ x: 50, y: 50, zoom: 1 }, { duration: 300 }); + }, + }), + [setViewport] + ); + + return ( + + + + ); + } +); + +TransformationDAGInner.displayName = 'TransformationDAGInner'; + +export const TransformationDAG = forwardRef( + (props, ref) => ( +
+ + + +
+ ) +); + +TransformationDAG.displayName = 'TransformationDAG'; diff --git a/frontend/src/components/TransformationInputPanel.tsx b/frontend/src/components/TransformationInputPanel.tsx new file mode 100644 index 0000000..a057b73 --- /dev/null +++ b/frontend/src/components/TransformationInputPanel.tsx @@ -0,0 +1,156 @@ +import { Card, Select, Slider, Typography, Space, Button, Divider } from 'antd'; +import { ThunderboltOutlined } from '@ant-design/icons'; +import { ExpertConfigPanel } from './transformation'; + +const { Title, Text } = Typography; + +interface TransformationInputPanelProps { + onTransform: () => void; + loading: boolean; + hasData: boolean; + isDark: boolean; + model: string; + temperature: number; + expertConfig: { + expert_count: number; + keywords_per_expert: number; + custom_experts?: string[]; + }; + customExpertsInput: string; + onModelChange: (model: string) => void; + onTemperatureChange: (temperature: number) => void; + onExpertConfigChange: (config: { + expert_count: number; + keywords_per_expert: number; + custom_experts?: string[]; + }) => void; + onCustomExpertsInputChange: (value: string) => void; + availableModels: string[]; +} + +export const TransformationInputPanel: React.FC = ({ + onTransform, + loading, + hasData, + isDark, + model, + temperature, + expertConfig, + customExpertsInput, + onModelChange, + onTemperatureChange, + onExpertConfigChange, + onCustomExpertsInputChange, + availableModels, +}) => { + return ( +
+ + {/* Header */} +
+ + <ThunderboltOutlined style={{ marginRight: 8 }} /> + Transformation Agent + + + 使用專家視角生成創新關鍵字 + +
+ + + + {/* LLM Settings */} + + +
+ 模型 + + + 手動指定專家類型,系統會優先使用並自動補充不足的專家 + + +
+ + + +
+ +
+ + 預計生成 +
+
+ + 每個屬性將生成{' '} + + {expectedKeywordsPerAttribute} + {' '} + 個關鍵字 + +
+ + ({expertCount} 位專家 × {keywordsPerExpert} 個/專家) + +
+
+
+ + {expectedKeywordsPerAttribute > 10 && ( +
+ + ⚠️ 關鍵字數量較多,可能需要較長處理時間 + +
+ )} +
+
+ ); +}; diff --git a/frontend/src/components/transformation/index.ts b/frontend/src/components/transformation/index.ts new file mode 100644 index 0000000..4cb91c5 --- /dev/null +++ b/frontend/src/components/transformation/index.ts @@ -0,0 +1,29 @@ +import { KeywordNode } from './nodes/KeywordNode'; +import { ExpertKeywordNode } from './nodes/ExpertKeywordNode'; +import { DescriptionNode } from './nodes/DescriptionNode'; +import { CategoryNode } from './nodes/CategoryNode'; +import { OriginalAttributeNode } from './nodes/OriginalAttributeNode'; +import { DividerNode } from './nodes/DividerNode'; +import { QueryNode } from '../dag/nodes/QueryNode'; + +export const transformationNodeTypes = { + query: QueryNode, + category: CategoryNode, + keyword: KeywordNode, + expertKeyword: ExpertKeywordNode, + description: DescriptionNode, + originalAttribute: OriginalAttributeNode, + divider: DividerNode, +}; + +export { + KeywordNode, + ExpertKeywordNode, + DescriptionNode, + CategoryNode, + OriginalAttributeNode, + DividerNode +}; +export { useTransformationLayout } from './useTransformationLayout'; +export { useExpertTransformationLayout } from './useExpertTransformationLayout'; +export { ExpertConfigPanel } from './ExpertConfigPanel'; diff --git a/frontend/src/components/transformation/nodes/CategoryNode.tsx b/frontend/src/components/transformation/nodes/CategoryNode.tsx new file mode 100644 index 0000000..92eeba7 --- /dev/null +++ b/frontend/src/components/transformation/nodes/CategoryNode.tsx @@ -0,0 +1,38 @@ +import { memo } from 'react'; + +interface CategoryNodeProps { + data: { + label: string; + color: string; + attributeCount: number; + isDark: boolean; + }; +} + +export const CategoryNode = memo(({ data }: CategoryNodeProps) => { + const { label, color, isDark } = data; + + return ( +
+ {label} +
+ ); +}); + +CategoryNode.displayName = 'CategoryNode'; diff --git a/frontend/src/components/transformation/nodes/DescriptionNode.tsx b/frontend/src/components/transformation/nodes/DescriptionNode.tsx new file mode 100644 index 0000000..3d82903 --- /dev/null +++ b/frontend/src/components/transformation/nodes/DescriptionNode.tsx @@ -0,0 +1,84 @@ +import { memo, useState } from 'react'; + +interface DescriptionNodeProps { + data: { + keyword: string; + description: string; + color: string; + isDark: boolean; + }; +} + +export const DescriptionNode = memo(({ data }: DescriptionNodeProps) => { + const { keyword, description, isDark } = data; + const [isHovered, setIsHovered] = useState(false); + + return ( +
setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + style={{ + padding: '10px 14px', + borderRadius: 8, + background: isDark + ? 'linear-gradient(135deg, rgba(82, 196, 26, 0.2) 0%, rgba(82, 196, 26, 0.1) 100%)' + : 'linear-gradient(135deg, rgba(82, 196, 26, 0.15) 0%, rgba(82, 196, 26, 0.05) 100%)', + border: `2px solid ${isHovered ? '#52c41a' : '#52c41a80'}`, + color: isDark ? '#fff' : '#333', + fontSize: '12px', + width: 400, + minHeight: 50, + cursor: 'pointer', + transition: 'all 0.2s ease', + userSelect: 'none', + boxShadow: isHovered + ? `0 4px 12px ${isDark ? 'rgba(82, 196, 26, 0.4)' : 'rgba(82, 196, 26, 0.25)'}` + : `0 2px 6px ${isDark ? 'rgba(0, 0, 0, 0.3)' : 'rgba(0, 0, 0, 0.1)'}`, + }} + > +
+ + 創新應用 + + + {keyword} + +
+
+ {description} +
+
+ ); +}); + +DescriptionNode.displayName = 'DescriptionNode'; diff --git a/frontend/src/components/transformation/nodes/DividerNode.tsx b/frontend/src/components/transformation/nodes/DividerNode.tsx new file mode 100644 index 0000000..e1a09f0 --- /dev/null +++ b/frontend/src/components/transformation/nodes/DividerNode.tsx @@ -0,0 +1,26 @@ +import { memo } from 'react'; + +interface DividerNodeProps { + data: { + isDark: boolean; + }; +} + +export const DividerNode = memo(({ data }: DividerNodeProps) => { + const { isDark } = data; + + return ( +
+ ); +}); + +DividerNode.displayName = 'DividerNode'; diff --git a/frontend/src/components/transformation/nodes/ExpertKeywordNode.tsx b/frontend/src/components/transformation/nodes/ExpertKeywordNode.tsx new file mode 100644 index 0000000..21401e6 --- /dev/null +++ b/frontend/src/components/transformation/nodes/ExpertKeywordNode.tsx @@ -0,0 +1,92 @@ +import { memo, useState } from 'react'; + +interface ExpertKeywordNodeProps { + data: { + label: string; + expertName: string; + expertId: string; + color: string; + isDark: boolean; + }; +} + +export const ExpertKeywordNode = memo(({ data }: ExpertKeywordNodeProps) => { + const { label, expertName, color, isDark } = data; + const [isHovered, setIsHovered] = useState(false); + + return ( +
setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + style={{ + position: 'relative', + display: 'flex', + flexDirection: 'column', + gap: 4, + padding: '6px 12px', + paddingRight: '36px', + marginTop: 18, + borderRadius: 6, + background: isDark + ? 'linear-gradient(135deg, rgba(24, 144, 255, 0.15) 0%, rgba(255, 255, 255, 0.1) 100%)' + : 'linear-gradient(135deg, rgba(24, 144, 255, 0.1) 0%, rgba(0, 0, 0, 0.03) 100%)', + border: `2px solid ${color}`, + borderWidth: isHovered ? 3 : 2, + color: isDark ? '#fff' : '#333', + fontSize: '13px', + fontWeight: 500, + textAlign: 'center', + cursor: 'pointer', + transition: 'all 0.2s ease', + whiteSpace: 'nowrap', + userSelect: 'none', + filter: isHovered ? 'brightness(1.1)' : 'none', + boxShadow: isDark + ? '0 2px 8px rgba(24, 144, 255, 0.2)' + : '0 2px 8px rgba(24, 144, 255, 0.15)', + }} + > + {/* Expert Badge - positioned above the node */} +
+ {expertName} +
+ + {/* Keyword Label */} +
{label}
+ + {/* NEW Badge - positioned below the node */} + + NEW + +
+ ); +}); + +ExpertKeywordNode.displayName = 'ExpertKeywordNode'; diff --git a/frontend/src/components/transformation/nodes/KeywordNode.tsx b/frontend/src/components/transformation/nodes/KeywordNode.tsx new file mode 100644 index 0000000..da1ec3d --- /dev/null +++ b/frontend/src/components/transformation/nodes/KeywordNode.tsx @@ -0,0 +1,65 @@ +import { memo, useState } from 'react'; + +interface KeywordNodeProps { + data: { + label: string; + color: string; + isDark: boolean; + }; +} + +export const KeywordNode = memo(({ data }: KeywordNodeProps) => { + const { label, color, isDark } = data; + const [isHovered, setIsHovered] = useState(false); + + return ( +
setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + style={{ + position: 'relative', + padding: '6px 12px', + paddingRight: '36px', + borderRadius: 6, + background: isDark + ? 'linear-gradient(135deg, rgba(250, 173, 20, 0.15) 0%, rgba(255, 255, 255, 0.1) 100%)' + : 'linear-gradient(135deg, rgba(250, 173, 20, 0.1) 0%, rgba(0, 0, 0, 0.03) 100%)', + border: `2px solid ${color}`, + borderWidth: isHovered ? 3 : 2, + color: isDark ? '#fff' : '#333', + fontSize: '13px', + fontWeight: 500, + textAlign: 'center', + cursor: 'pointer', + transition: 'all 0.2s ease', + whiteSpace: 'nowrap', + userSelect: 'none', + filter: isHovered ? 'brightness(1.1)' : 'none', + boxShadow: isDark + ? '0 2px 8px rgba(250, 173, 20, 0.2)' + : '0 2px 8px rgba(250, 173, 20, 0.15)', + }} + > + {label} + + NEW + +
+ ); +}); + +KeywordNode.displayName = 'KeywordNode'; diff --git a/frontend/src/components/transformation/nodes/OriginalAttributeNode.tsx b/frontend/src/components/transformation/nodes/OriginalAttributeNode.tsx new file mode 100644 index 0000000..d0a2f3d --- /dev/null +++ b/frontend/src/components/transformation/nodes/OriginalAttributeNode.tsx @@ -0,0 +1,39 @@ +import { memo } from 'react'; + +interface OriginalAttributeNodeProps { + data: { + label: string; + color: string; + isDark: boolean; + }; +} + +export const OriginalAttributeNode = memo(({ data }: OriginalAttributeNodeProps) => { + const { label, color, isDark } = data; + + return ( +
+ {label} +
+ ); +}); + +OriginalAttributeNode.displayName = 'OriginalAttributeNode'; diff --git a/frontend/src/components/transformation/useExpertTransformationLayout.ts b/frontend/src/components/transformation/useExpertTransformationLayout.ts new file mode 100644 index 0000000..8627551 --- /dev/null +++ b/frontend/src/components/transformation/useExpertTransformationLayout.ts @@ -0,0 +1,332 @@ +import { useMemo } from 'react'; +import type { Node, Edge } from '@xyflow/react'; +import type { ExpertTransformationDAGResult, CategoryDefinition } from '../../types'; + +interface LayoutConfig { + isDark: boolean; + fontSize?: number; +} + +const COLOR_PALETTE = [ + { dark: '#177ddc', light: '#1890ff' }, // blue + { dark: '#854eca', light: '#722ed1' }, // purple + { dark: '#13a8a8', light: '#13c2c2' }, // cyan + { dark: '#d87a16', light: '#fa8c16' }, // orange + { dark: '#49aa19', light: '#52c41a' }, // green + { dark: '#1677ff', light: '#1890ff' }, // blue + { dark: '#eb2f96', light: '#f759ab' }, // magenta + { dark: '#faad14', light: '#ffc53d' }, // gold +]; + +// Estimate description card height based on text length +function estimateDescriptionHeight(description: string): number { + const cardWidth = 400; + const padding = 24; + const headerHeight = 32; + const charPerLine = Math.floor(cardWidth / 14); + const lineHeight = 20; + const lines = Math.ceil(description.length / charPerLine); + return Math.min(padding + headerHeight + lines * lineHeight, 140); +} + +export function useExpertTransformationLayout( + data: ExpertTransformationDAGResult | null, + categories: CategoryDefinition[], + config: LayoutConfig +): { nodes: Node[]; edges: Edge[] } { + return useMemo(() => { + if (!data || data.results.length === 0) { + return { nodes: [], edges: [] }; + } + + const { isDark, fontSize = 13 } = config; + const nodes: Node[] = []; + const edges: Edge[] = []; + + // Layout constants + const colStep = 140; + const categoryRowGap = 120; + const minItemGap = 12; + const expertKeywordGap = 24; // gap between expert keywords + + const queryX = 0; + const categoryX = colStep; + const originalAttrX = colStep * 2; + const keywordX = colStep * 3.2; + const descriptionX = colStep * 4.8; + + // Build category color map + const categoryColors: Record = {}; + categories.forEach((cat, index) => { + const paletteIndex = index % COLOR_PALETTE.length; + categoryColors[cat.name] = isDark + ? COLOR_PALETTE[paletteIndex].dark + : COLOR_PALETTE[paletteIndex].light; + }); + + // Pre-calculate layouts for each category + interface CategoryLayout { + attributeGroups: Array<{ + attribute: string; + expertKeywords: Array<{ + keyword: string; + expertName: string; + expertId: string; + yOffset: number; + }>; + descriptionYPositions: number[]; + totalHeight: number; + }>; + totalHeight: number; + } + + const categoryLayouts: CategoryLayout[] = data.results.map((result) => { + // Group expert keywords by source_attribute + const attributeMap = new Map(); + result.expert_keywords.forEach((kw) => { + if (!attributeMap.has(kw.source_attribute)) { + attributeMap.set(kw.source_attribute, []); + } + attributeMap.get(kw.source_attribute)!.push(kw); + }); + + const attributeGroups: CategoryLayout['attributeGroups'] = []; + let categoryTotalHeight = 0; + + // Process each source attribute group + result.original_attributes.forEach((attr) => { + const expertKeywords = attributeMap.get(attr) || []; + const keywordPositions: typeof attributeGroups[0]['expertKeywords'] = []; + const descYPositions: number[] = []; + let currentY = 0; + + expertKeywords.forEach((kw) => { + keywordPositions.push({ + keyword: kw.keyword, + expertName: kw.expert_name, + expertId: kw.expert_id, + yOffset: currentY, + }); + + // Find matching description + const matchingDesc = result.descriptions.find( + (d) => d.keyword === kw.keyword && d.expert_id === kw.expert_id + ); + const descHeight = matchingDesc + ? estimateDescriptionHeight(matchingDesc.description) + : 50; + + descYPositions.push(currentY); + currentY += descHeight + expertKeywordGap; + }); + + const groupHeight = currentY > 0 ? currentY - expertKeywordGap + minItemGap : 0; + + attributeGroups.push({ + attribute: attr, + expertKeywords: keywordPositions, + descriptionYPositions: descYPositions, + totalHeight: groupHeight, + }); + + categoryTotalHeight += groupHeight; + }); + + return { attributeGroups, totalHeight: categoryTotalHeight }; + }); + + // Calculate total height for query centering + const totalHeight = categoryLayouts.reduce( + (sum, layout, i) => + sum + layout.totalHeight + (i < categoryLayouts.length - 1 ? categoryRowGap : 0), + 0 + ); + + // Add Query node (centered vertically) + const queryY = totalHeight / 2 - 20; + nodes.push({ + id: 'query-node', + type: 'query', + position: { x: queryX, y: queryY }, + data: { + label: data.query, + isDark, + fontSize, + }, + draggable: false, + selectable: false, + }); + + // Track current Y position + let currentY = 0; + + // Process each category result + data.results.forEach((result, catIndex) => { + const categoryId = `category-${catIndex}`; + const color = categoryColors[result.category] || '#666'; + const layout = categoryLayouts[catIndex]; + + // Category Y position (centered within its group) + const categoryY = currentY + layout.totalHeight / 2 - 20; + + // Add category node + nodes.push({ + id: categoryId, + type: 'category', + position: { x: categoryX, y: categoryY }, + data: { + label: result.category, + color, + attributeCount: result.original_attributes.length, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from query to category + edges.push({ + id: `edge-query-${categoryId}`, + source: 'query-node', + target: categoryId, + type: 'smoothstep', + style: { stroke: color, strokeWidth: 2, opacity: 0.6 }, + animated: false, + }); + + // Process each attribute group + layout.attributeGroups.forEach((group, attrIndex) => { + const attrId = `orig-${catIndex}-${attrIndex}`; + const attrY = currentY + group.totalHeight / 2 - 12; + + // Add original attribute node + nodes.push({ + id: attrId, + type: 'originalAttribute', + position: { x: originalAttrX, y: attrY }, + data: { + label: group.attribute, + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from category to original attribute + edges.push({ + id: `edge-${categoryId}-${attrId}`, + source: categoryId, + target: attrId, + type: 'smoothstep', + style: { stroke: color, strokeWidth: 1, opacity: 0.3 }, + animated: false, + }); + + // Add expert keyword and description nodes + group.expertKeywords.forEach((kw, kwIndex) => { + const keywordId = `keyword-${catIndex}-${attrIndex}-${kwIndex}`; + const keywordY = currentY + kw.yOffset; + + // Expert keyword node + nodes.push({ + id: keywordId, + type: 'expertKeyword', + position: { x: keywordX, y: keywordY }, + data: { + label: kw.keyword, + expertName: kw.expertName, + expertId: kw.expertId, + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from original attribute to expert keyword + edges.push({ + id: `edge-${attrId}-${keywordId}`, + source: attrId, + target: keywordId, + type: 'smoothstep', + style: { + stroke: '#1890ff', + strokeWidth: 2, + opacity: 0.7, + strokeDasharray: '5,5', + }, + animated: true, + }); + + // Find matching description - match by expert_id and source_attribute + // (more lenient than exact keyword match since LLM may return slightly different text) + const matchingDesc = result.descriptions.find( + (d) => d.expert_id === kw.expertId && + (d.keyword === kw.keyword || + d.keyword.includes(kw.keyword) || + kw.keyword.includes(d.keyword)) + ); + + // Always show a description node (use fallback if not found) + const descId = `desc-${catIndex}-${attrIndex}-${kwIndex}`; + const descY = currentY + group.descriptionYPositions[kwIndex]; + + nodes.push({ + id: descId, + type: 'description', + position: { x: descriptionX, y: descY }, + data: { + keyword: matchingDesc?.keyword || kw.keyword, + description: matchingDesc?.description || '(描述生成中...)', + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from expert keyword to description (always show) + edges.push({ + id: `edge-${keywordId}-${descId}`, + source: keywordId, + target: descId, + type: 'smoothstep', + style: { + stroke: matchingDesc ? '#52c41a' : '#999', + strokeWidth: 2, + opacity: matchingDesc ? 0.6 : 0.3, + strokeDasharray: matchingDesc ? undefined : '4,4', + }, + animated: !matchingDesc, + }); + }); + + currentY += group.totalHeight; + }); + + // Add divider line between categories (except after last one) + if (catIndex < data.results.length - 1) { + const dividerY = currentY + categoryRowGap / 2 - 1; + nodes.push({ + id: `divider-${catIndex}`, + type: 'divider', + position: { x: queryX, y: dividerY }, + data: { + isDark, + }, + draggable: false, + selectable: false, + style: { + width: descriptionX + 400, + zIndex: -1, + }, + }); + + currentY += categoryRowGap; + } + }); + + return { nodes, edges }; + }, [data, categories, config]); +} diff --git a/frontend/src/components/transformation/useTransformationLayout.ts b/frontend/src/components/transformation/useTransformationLayout.ts new file mode 100644 index 0000000..64b95f1 --- /dev/null +++ b/frontend/src/components/transformation/useTransformationLayout.ts @@ -0,0 +1,294 @@ +import { useMemo } from 'react'; +import type { Node, Edge } from '@xyflow/react'; +import type { TransformationDAGResult, CategoryDefinition } from '../../types'; + +interface LayoutConfig { + isDark: boolean; + fontSize?: number; +} + +const COLOR_PALETTE = [ + { dark: '#177ddc', light: '#1890ff' }, // blue + { dark: '#854eca', light: '#722ed1' }, // purple + { dark: '#13a8a8', light: '#13c2c2' }, // cyan + { dark: '#d87a16', light: '#fa8c16' }, // orange + { dark: '#49aa19', light: '#52c41a' }, // green + { dark: '#1677ff', light: '#1890ff' }, // blue + { dark: '#eb2f96', light: '#f759ab' }, // magenta + { dark: '#faad14', light: '#ffc53d' }, // gold +]; + +// Estimate description card height based on text length +function estimateDescriptionHeight(description: string): number { + const cardWidth = 320; // increased card width + const padding = 20; // top + bottom padding + const headerHeight = 30; // header section with keyword + const charPerLine = Math.floor(cardWidth / 13); // ~13px per Chinese char + const lineHeight = 18; // line height in px + const lines = Math.ceil(description.length / charPerLine); + // Cap at reasonable height to prevent huge gaps + return Math.min(padding + headerHeight + lines * lineHeight, 120); +} + +export function useTransformationLayout( + data: TransformationDAGResult | null, + categories: CategoryDefinition[], + config: LayoutConfig +): { nodes: Node[]; edges: Edge[] } { + return useMemo(() => { + if (!data || data.results.length === 0) { + return { nodes: [], edges: [] }; + } + + const { isDark, fontSize = 13 } = config; + const nodes: Node[] = []; + const edges: Edge[] = []; + + // Layout constants + const colStep = 140; + const categoryRowGap = 120; // large gap between different categories + const minItemGap = 12; // minimum gap between transformation items + const origAttrRowStep = 36; // step for original attributes (same visual rhythm) + + const queryX = 0; + const categoryX = colStep; + const originalAttrX = colStep * 2; + const keywordX = colStep * 3.2; // increased gap from original attributes + const descriptionX = colStep * 4.8; // moved further right for wider cards + + // Build category color map + const categoryColors: Record = {}; + categories.forEach((cat, index) => { + const paletteIndex = index % COLOR_PALETTE.length; + categoryColors[cat.name] = isDark + ? COLOR_PALETTE[paletteIndex].dark + : COLOR_PALETTE[paletteIndex].light; + }); + + // Pre-calculate all description heights and keyword Y positions for each category + interface CategoryLayout { + keywordYPositions: number[]; + origAttrYPositions: number[]; + totalHeight: number; + } + + const categoryLayouts: CategoryLayout[] = data.results.map((result) => { + // Calculate keyword/description Y positions based on description heights + const keywordYPositions: number[] = []; + let currentKeywordY = 0; + + result.new_keywords.forEach((keyword) => { + keywordYPositions.push(currentKeywordY); + + // Find matching description to calculate height + const matchingDesc = result.descriptions.find((d) => d.keyword === keyword); + const descHeight = matchingDesc + ? estimateDescriptionHeight(matchingDesc.description) + : 50; + + // Next keyword starts after this description + currentKeywordY += descHeight + minItemGap; + }); + + const keywordTotalHeight = currentKeywordY > 0 ? currentKeywordY - minItemGap : 0; + + // Calculate original attribute positions to match the same total height + const origCount = result.original_attributes.length; + const origAttrYPositions: number[] = []; + + if (origCount > 0) { + // Distribute original attributes evenly across the same height as keywords + const effectiveHeight = Math.max(keywordTotalHeight, origCount * origAttrRowStep); + const origStep = origCount > 1 ? effectiveHeight / (origCount - 1) : 0; + + for (let i = 0; i < origCount; i++) { + origAttrYPositions.push(i * (origCount > 1 ? origStep : 0)); + } + } + + const totalHeight = Math.max(keywordTotalHeight, origCount * origAttrRowStep); + + return { keywordYPositions, origAttrYPositions, totalHeight }; + }); + + // Calculate total height for query centering + const totalHeight = categoryLayouts.reduce( + (sum, layout, i) => + sum + layout.totalHeight + (i < categoryLayouts.length - 1 ? categoryRowGap : 0), + 0 + ); + + // Add Query node (centered vertically) + const queryY = totalHeight / 2 - 20; + nodes.push({ + id: 'query-node', + type: 'query', + position: { x: queryX, y: queryY }, + data: { + label: data.query, + isDark, + fontSize, + }, + draggable: false, + selectable: false, + }); + + // Track current Y position + let currentY = 0; + + // Process each category result + data.results.forEach((result, catIndex) => { + const categoryId = `category-${catIndex}`; + const color = categoryColors[result.category] || '#666'; + const layout = categoryLayouts[catIndex]; + + // Category Y position (centered within its group) + const categoryY = currentY + layout.totalHeight / 2 - 20; + + // Add category node + nodes.push({ + id: categoryId, + type: 'category', + position: { x: categoryX, y: categoryY }, + data: { + label: result.category, + color, + attributeCount: result.original_attributes.length, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from query to category + edges.push({ + id: `edge-query-${categoryId}`, + source: 'query-node', + target: categoryId, + type: 'smoothstep', + style: { stroke: color, strokeWidth: 2, opacity: 0.6 }, + animated: false, + }); + + // Add original attribute nodes (distributed to match keyword spacing) + result.original_attributes.forEach((attr, attrIndex) => { + const attrId = `orig-${catIndex}-${attrIndex}`; + const attrY = currentY + (layout.origAttrYPositions[attrIndex] || 0); + + nodes.push({ + id: attrId, + type: 'originalAttribute', + position: { x: originalAttrX, y: attrY }, + data: { + label: attr, + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from category to original attribute + edges.push({ + id: `edge-${categoryId}-${attrId}`, + source: categoryId, + target: attrId, + type: 'smoothstep', + style: { stroke: color, strokeWidth: 1, opacity: 0.3 }, + animated: false, + }); + }); + + // Add keyword and description nodes with smart Y positions + result.new_keywords.forEach((keyword, kwIndex) => { + const keywordId = `keyword-${catIndex}-${kwIndex}`; + const keywordY = currentY + layout.keywordYPositions[kwIndex]; + + // Keyword node + nodes.push({ + id: keywordId, + type: 'keyword', + position: { x: keywordX, y: keywordY }, + data: { + label: keyword, + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from category to keyword + edges.push({ + id: `edge-${categoryId}-${keywordId}`, + source: categoryId, + target: keywordId, + type: 'smoothstep', + style: { + stroke: '#faad14', + strokeWidth: 2, + opacity: 0.7, + strokeDasharray: '5,5', + }, + animated: true, + }); + + // Find matching description + const matchingDesc = result.descriptions.find((d) => d.keyword === keyword); + if (matchingDesc) { + const descId = `desc-${catIndex}-${kwIndex}`; + + nodes.push({ + id: descId, + type: 'description', + position: { x: descriptionX, y: keywordY }, + data: { + keyword: matchingDesc.keyword, + description: matchingDesc.description, + color, + isDark, + }, + draggable: false, + selectable: false, + }); + + // Edge from keyword to description + edges.push({ + id: `edge-${keywordId}-${descId}`, + source: keywordId, + target: descId, + type: 'smoothstep', + style: { stroke: '#52c41a', strokeWidth: 2, opacity: 0.6 }, + animated: false, + }); + } + }); + + // Move Y position for next category + currentY += layout.totalHeight; + + // Add divider line between categories (except after last one) + if (catIndex < data.results.length - 1) { + const dividerY = currentY + categoryRowGap / 2 - 1; + nodes.push({ + id: `divider-${catIndex}`, + type: 'divider', + position: { x: queryX, y: dividerY }, + data: { + isDark, + }, + draggable: false, + selectable: false, + style: { + width: descriptionX + 400, + zIndex: -1, + }, + }); + } + + currentY += categoryRowGap; + }); + + return { nodes, edges }; + }, [data, categories, config]); +} diff --git a/frontend/src/hooks/useExpertTransformation.ts b/frontend/src/hooks/useExpertTransformation.ts new file mode 100644 index 0000000..0b0e4f5 --- /dev/null +++ b/frontend/src/hooks/useExpertTransformation.ts @@ -0,0 +1,238 @@ +import { useState, useCallback } from 'react'; +import { expertTransformCategoryStream } from '../services/api'; +import type { + ExpertTransformationInput, + ExpertTransformationProgress, + ExpertTransformationCategoryResult, + ExpertTransformationDAGResult, + ExpertProfile, + CategoryDefinition, +} from '../types'; + +interface UseExpertTransformationOptions { + model?: string; + temperature?: number; +} + +export function useExpertTransformation(options: UseExpertTransformationOptions = {}) { + const [loading, setLoading] = useState(false); + const [progress, setProgress] = useState({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '', + }); + const [results, setResults] = useState(null); + const [error, setError] = useState(null); + + // Global expert team - generated once and shared across all categories + const [experts, setExperts] = useState(null); + + const transformCategory = useCallback( + async ( + query: string, + category: CategoryDefinition, + attributes: string[], + expertConfig: { + expert_count: number; + keywords_per_expert: number; + custom_experts?: string[]; + } + ): Promise<{ + result: ExpertTransformationCategoryResult | null; + experts: ExpertProfile[]; + }> => { + return new Promise((resolve) => { + let categoryExperts: ExpertProfile[] = []; + + setProgress((prev) => ({ + ...prev, + step: 'expert', + currentCategory: category.name, + message: `組建專家團隊...`, + })); + + expertTransformCategoryStream( + { + query, + category: category.name, + attributes, + expert_count: expertConfig.expert_count, + keywords_per_expert: expertConfig.keywords_per_expert, + custom_experts: expertConfig.custom_experts, + model: options.model, + temperature: options.temperature, + }, + { + onExpertStart: () => { + setProgress((prev) => ({ + ...prev, + step: 'expert', + message: `正在組建專家團隊...`, + })); + }, + onExpertComplete: (expertsData) => { + categoryExperts = expertsData; + setExperts(expertsData); + setProgress((prev) => ({ + ...prev, + experts: expertsData, + message: `專家團隊組建完成(${expertsData.length}位專家)`, + })); + }, + onKeywordStart: () => { + setProgress((prev) => ({ + ...prev, + step: 'keyword', + message: `專家團隊為「${category.name}」的屬性生成關鍵字...`, + })); + }, + onKeywordProgress: (data) => { + setProgress((prev) => ({ + ...prev, + currentAttribute: data.attribute, + message: `為「${data.attribute}」生成了 ${data.count} 個關鍵字`, + })); + }, + onKeywordComplete: (totalKeywords) => { + setProgress((prev) => ({ + ...prev, + message: `共生成了 ${totalKeywords} 個專家關鍵字`, + })); + }, + onDescriptionStart: () => { + setProgress((prev) => ({ + ...prev, + step: 'description', + message: `為「${category.name}」的專家關鍵字生成創新描述...`, + })); + }, + onDescriptionComplete: (count) => { + setProgress((prev) => ({ + ...prev, + message: `生成了 ${count} 個創新描述`, + })); + }, + onDone: (data) => { + setProgress((prev) => ({ + ...prev, + step: 'done', + processedCategories: [...prev.processedCategories, category.name], + message: `「${category.name}」處理完成`, + })); + resolve({ + result: data.result, + experts: data.experts, + }); + }, + onError: (err) => { + setProgress((prev) => ({ + ...prev, + step: 'error', + error: err, + message: `處理「${category.name}」時發生錯誤`, + })); + resolve({ + result: null, + experts: categoryExperts, + }); + }, + } + ).catch((err) => { + setProgress((prev) => ({ + ...prev, + step: 'error', + error: err.message, + message: `處理「${category.name}」時發生錯誤`, + })); + resolve({ + result: null, + experts: categoryExperts, + }); + }); + }); + }, + [options.model, options.temperature] + ); + + const transformAll = useCallback( + async (input: ExpertTransformationInput) => { + setLoading(true); + setError(null); + setResults(null); + setExperts(null); + setProgress({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '開始處理...', + }); + + const categoryResults: ExpertTransformationCategoryResult[] = []; + let globalExperts: ExpertProfile[] = []; + + // Process each category sequentially + for (const category of input.categories) { + const attributes = input.attributesByCategory[category.name] || []; + if (attributes.length === 0) continue; + + const { result, experts: categoryExperts } = await transformCategory( + input.query, + category, + attributes, + input.expertConfig + ); + + // Store global experts from first category + if (globalExperts.length === 0 && categoryExperts.length > 0) { + globalExperts = categoryExperts; + } + + if (result) { + categoryResults.push(result); + } + } + + // Build final result + const finalResult: ExpertTransformationDAGResult = { + query: input.query, + experts: globalExperts, + results: categoryResults, + }; + + setResults(finalResult); + setLoading(false); + setProgress((prev) => ({ + ...prev, + step: 'done', + message: '所有類別處理完成', + })); + + return finalResult; + }, + [transformCategory] + ); + + const clearResults = useCallback(() => { + setResults(null); + setError(null); + setExperts(null); + setProgress({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '', + }); + }, []); + + return { + loading, + progress, + results, + error, + experts, + transformCategory, + transformAll, + clearResults, + }; +} diff --git a/frontend/src/hooks/useTransformation.ts b/frontend/src/hooks/useTransformation.ts new file mode 100644 index 0000000..5ac994a --- /dev/null +++ b/frontend/src/hooks/useTransformation.ts @@ -0,0 +1,175 @@ +import { useState, useCallback } from 'react'; +import { transformCategoryStream } from '../services/api'; +import type { + TransformationInput, + TransformationProgress, + TransformationCategoryResult, + TransformationDAGResult, + CategoryDefinition, +} from '../types'; + +interface UseTransformationOptions { + model?: string; + temperature?: number; + keywordCount?: number; +} + +export function useTransformation(options: UseTransformationOptions = {}) { + const [loading, setLoading] = useState(false); + const [progress, setProgress] = useState({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '', + }); + const [results, setResults] = useState(null); + const [error, setError] = useState(null); + + const transformCategory = useCallback( + async ( + query: string, + category: CategoryDefinition, + attributes: string[] + ): Promise => { + return new Promise((resolve) => { + setProgress((prev) => ({ + ...prev, + step: 'keyword', + currentCategory: category.name, + message: `為「${category.name}」生成新關鍵字...`, + })); + + transformCategoryStream( + { + query, + category: category.name, + attributes, + model: options.model, + temperature: options.temperature, + keyword_count: options.keywordCount || 3, + }, + { + onKeywordStart: () => { + setProgress((prev) => ({ + ...prev, + step: 'keyword', + message: `為「${category.name}」生成新關鍵字...`, + })); + }, + onKeywordComplete: (keywords) => { + setProgress((prev) => ({ + ...prev, + message: `生成了 ${keywords.length} 個新關鍵字`, + })); + }, + onDescriptionStart: () => { + setProgress((prev) => ({ + ...prev, + step: 'description', + message: `為「${category.name}」生成創新描述...`, + })); + }, + onDescriptionComplete: (count) => { + setProgress((prev) => ({ + ...prev, + message: `生成了 ${count} 個創新描述`, + })); + }, + onDone: (result) => { + setProgress((prev) => ({ + ...prev, + step: 'done', + processedCategories: [...prev.processedCategories, category.name], + message: `「${category.name}」處理完成`, + })); + resolve(result); + }, + onError: (err) => { + setProgress((prev) => ({ + ...prev, + step: 'error', + error: err, + message: `處理「${category.name}」時發生錯誤`, + })); + resolve(null); + }, + } + ).catch((err) => { + setProgress((prev) => ({ + ...prev, + step: 'error', + error: err.message, + message: `處理「${category.name}」時發生錯誤`, + })); + resolve(null); + }); + }); + }, + [options.model, options.temperature, options.keywordCount] + ); + + const transformAll = useCallback( + async (input: TransformationInput) => { + setLoading(true); + setError(null); + setResults(null); + setProgress({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '開始處理...', + }); + + const categoryResults: TransformationCategoryResult[] = []; + + // Process each category sequentially + for (const category of input.categories) { + const attributes = input.attributesByCategory[category.name] || []; + if (attributes.length === 0) continue; + + const result = await transformCategory(input.query, category, attributes); + if (result) { + categoryResults.push(result); + } + } + + // Build final result + const finalResult: TransformationDAGResult = { + query: input.query, + results: categoryResults, + }; + + setResults(finalResult); + setLoading(false); + setProgress((prev) => ({ + ...prev, + step: 'done', + message: '所有類別處理完成', + })); + + return finalResult; + }, + [transformCategory] + ); + + const clearResults = useCallback(() => { + setResults(null); + setError(null); + setProgress({ + step: 'idle', + currentCategory: '', + processedCategories: [], + message: '', + }); + }, []); + + return { + loading, + progress, + results, + error, + transformCategory, + transformAll, + clearResults, + }; +} diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index 663d7c6..015fd96 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -5,7 +5,12 @@ import type { Step0Result, CategoryDefinition, DynamicStep1Result, - DAGStreamAnalyzeResponse + DAGStreamAnalyzeResponse, + TransformationRequest, + TransformationCategoryResult, + ExpertTransformationRequest, + ExpertTransformationCategoryResult, + ExpertProfile } from '../types'; // 自動使用當前瀏覽器的 hostname,支援遠端存取 @@ -114,3 +119,183 @@ export async function getModels(): Promise { return response.json(); } + +// ===== Transformation Agent API ===== + +export interface TransformationSSECallbacks { + onKeywordStart?: () => void; + onKeywordComplete?: (keywords: string[]) => void; + onDescriptionStart?: () => void; + onDescriptionComplete?: (count: number) => void; + onDone?: (result: TransformationCategoryResult) => void; + onError?: (error: string) => void; +} + +export async function transformCategoryStream( + request: TransformationRequest, + callbacks: TransformationSSECallbacks +): Promise { + const response = await fetch(`${API_BASE_URL}/transformation/category`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(request), + }); + + if (!response.ok) { + throw new Error(`API error: ${response.statusText}`); + } + + const reader = response.body?.getReader(); + if (!reader) { + throw new Error('No response body'); + } + + const decoder = new TextDecoder(); + let buffer = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + + // 解析 SSE 事件 + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; + + for (const chunk of lines) { + if (!chunk.trim()) continue; + + const eventMatch = chunk.match(/event: (\w+)/); + const dataMatch = chunk.match(/data: (.+)/s); + + if (eventMatch && dataMatch) { + const eventType = eventMatch[1]; + try { + const eventData = JSON.parse(dataMatch[1]); + + switch (eventType) { + case 'keyword_start': + callbacks.onKeywordStart?.(); + break; + case 'keyword_complete': + callbacks.onKeywordComplete?.(eventData.keywords); + break; + case 'description_start': + callbacks.onDescriptionStart?.(); + break; + case 'description_complete': + callbacks.onDescriptionComplete?.(eventData.count); + break; + case 'done': + callbacks.onDone?.(eventData.result); + break; + case 'error': + callbacks.onError?.(eventData.error); + break; + } + } catch (e) { + console.error('Failed to parse SSE event:', e, chunk); + } + } + } + } +} + +// ===== Expert Transformation Agent API ===== + +export interface ExpertTransformationSSECallbacks { + onExpertStart?: () => void; + onExpertComplete?: (experts: ExpertProfile[]) => void; + onKeywordStart?: () => void; + onKeywordProgress?: (data: { attribute: string; count: number }) => void; + onKeywordComplete?: (totalKeywords: number) => void; + onDescriptionStart?: () => void; + onDescriptionComplete?: (count: number) => void; + onDone?: (data: { result: ExpertTransformationCategoryResult; experts: ExpertProfile[] }) => void; + onError?: (error: string) => void; +} + +export async function expertTransformCategoryStream( + request: ExpertTransformationRequest, + callbacks: ExpertTransformationSSECallbacks +): Promise { + const response = await fetch(`${API_BASE_URL}/expert-transformation/category`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(request), + }); + + if (!response.ok) { + throw new Error(`API error: ${response.statusText}`); + } + + const reader = response.body?.getReader(); + if (!reader) { + throw new Error('No response body'); + } + + const decoder = new TextDecoder(); + let buffer = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + + // 解析 SSE 事件 + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; + + for (const chunk of lines) { + if (!chunk.trim()) continue; + + const eventMatch = chunk.match(/event: (\w+)/); + const dataMatch = chunk.match(/data: (.+)/s); + + if (eventMatch && dataMatch) { + const eventType = eventMatch[1]; + try { + const eventData = JSON.parse(dataMatch[1]); + + switch (eventType) { + case 'expert_start': + callbacks.onExpertStart?.(); + break; + case 'expert_complete': + callbacks.onExpertComplete?.(eventData.experts); + break; + case 'keyword_start': + callbacks.onKeywordStart?.(); + break; + case 'keyword_progress': + callbacks.onKeywordProgress?.(eventData); + break; + case 'keyword_complete': + callbacks.onKeywordComplete?.(eventData.total_keywords); + break; + case 'description_start': + callbacks.onDescriptionStart?.(); + break; + case 'description_complete': + callbacks.onDescriptionComplete?.(eventData.count); + break; + case 'done': + callbacks.onDone?.(eventData); + break; + case 'error': + callbacks.onError?.(eventData.error); + break; + } + } catch (e) { + console.error('Failed to parse SSE event:', e, chunk); + } + } + } + } +} diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 0f5a13b..6564381 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -151,3 +151,113 @@ export interface DAGStreamAnalyzeResponse { relationships: DAGRelationship[]; dag: AttributeDAG; } + +// ===== Transformation Agent types ===== + +export interface TransformationRequest { + query: string; + category: string; + attributes: string[]; + model?: string; + temperature?: number; + keyword_count?: number; +} + +export interface TransformationDescription { + keyword: string; + description: string; +} + +export interface TransformationCategoryResult { + category: string; + original_attributes: string[]; + new_keywords: string[]; + descriptions: TransformationDescription[]; +} + +export interface TransformationDAGResult { + query: string; + results: TransformationCategoryResult[]; +} + +export interface TransformationProgress { + step: 'idle' | 'keyword' | 'description' | 'done' | 'error'; + currentCategory: string; + processedCategories: string[]; + message: string; + error?: string; +} + +export interface TransformationInput { + query: string; + categories: CategoryDefinition[]; + attributesByCategory: Record; +} + +// ===== Expert Transformation Agent types ===== + +export interface ExpertProfile { + id: string; // "expert-0" + name: string; // "藥師" + domain: string; // "醫療與健康" + perspective?: string; // "從藥物與健康管理角度思考" +} + +export interface ExpertKeyword { + keyword: string; + expert_id: string; + expert_name: string; + source_attribute: string; // 來自哪個原始屬性 +} + +export interface ExpertTransformationDescription { + keyword: string; + expert_id: string; + expert_name: string; + description: string; +} + +export interface ExpertTransformationCategoryResult { + category: string; + original_attributes: string[]; + expert_keywords: ExpertKeyword[]; + descriptions: ExpertTransformationDescription[]; +} + +export interface ExpertTransformationDAGResult { + query: string; + experts: ExpertProfile[]; + results: ExpertTransformationCategoryResult[]; +} + +export interface ExpertTransformationRequest { + query: string; + category: string; + attributes: string[]; + expert_count: number; // 2-8 + keywords_per_expert: number; // 1-3 + custom_experts?: string[]; // ["藥師", "工程師"] + model?: string; + temperature?: number; +} + +export interface ExpertTransformationProgress { + step: 'idle' | 'expert' | 'keyword' | 'description' | 'done' | 'error'; + currentCategory: string; + processedCategories: string[]; + experts?: ExpertProfile[]; + currentAttribute?: string; + message: string; + error?: string; +} + +export interface ExpertTransformationInput { + query: string; + categories: CategoryDefinition[]; + attributesByCategory: Record; + expertConfig: { + expert_count: number; + keywords_per_expert: number; + custom_experts?: string[]; + }; +}