chore: save local changes

This commit is contained in:
2026-01-05 22:32:08 +08:00
parent bc281b8e0a
commit ec48709755
42 changed files with 5576 additions and 254 deletions

View File

@@ -3,10 +3,11 @@ from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .routers import attributes, transformation, expert_transformation, deduplication
from .routers import attributes, transformation, expert_transformation, deduplication, patent_search
from .services.llm_service import ollama_provider
from .services.embedding_service import embedding_service
from .services.llm_deduplication_service import llm_deduplication_service
from .services.patent_search_service import patent_search_service
@asynccontextmanager
@@ -15,6 +16,7 @@ async def lifespan(app: FastAPI):
await ollama_provider.close()
await embedding_service.close()
await llm_deduplication_service.close()
await patent_search_service.close()
app = FastAPI(
@@ -36,6 +38,7 @@ app.include_router(attributes.router)
app.include_router(transformation.router)
app.include_router(expert_transformation.router)
app.include_router(deduplication.router)
app.include_router(patent_search.router)
@app.get("/")

View File

@@ -1,7 +1,10 @@
from pydantic import BaseModel
from typing import Optional, List, Dict
from typing import Optional, List, Dict, Literal
from enum import Enum
# Language type for prompts
LanguageType = Literal["zh", "en"]
class AttributeNode(BaseModel):
name: str
@@ -47,16 +50,19 @@ class CausalChain(BaseModel):
class StreamAnalyzeRequest(BaseModel):
"""多步驟分析請求(更新為支持動態類別)"""
"""Multi-step analysis request (updated to support dynamic categories)"""
query: str
model: Optional[str] = None
temperature: Optional[float] = 0.7
chain_count: int = 5 # 用戶可設定要生成多少條因果鏈
chain_count: int = 5 # User can set how many causal chains to generate
# 新增:動態類別支持
category_mode: Optional[str] = "dynamic_auto" # CategoryMode enum
# Dynamic category support
category_mode: Optional[str] = "dynamic_auto" # CategoryMode enum value
custom_categories: Optional[List[str]] = None
suggested_category_count: int = 3 # 建議 LLM 生成的類別數量
suggested_category_count: int = 3 # Suggest LLM to generate this many categories
# Language setting
lang: LanguageType = "zh"
class StreamAnalyzeResponse(BaseModel):
@@ -136,13 +142,14 @@ class DAGRelationship(BaseModel):
# ===== Transformation Agent schemas =====
class TransformationRequest(BaseModel):
"""Transformation Agent 請求"""
query: str # 原始查詢 (e.g., "腳踏車")
category: str # 類別名稱 (e.g., "功能")
attributes: List[str] # 該類別的屬性列表
"""Transformation Agent request"""
query: str # Original query (e.g., "bicycle")
category: str # Category name (e.g., "Functions")
attributes: List[str] # Attribute list for this category
model: Optional[str] = None
temperature: Optional[float] = 0.7
keyword_count: int = 3 # 要生成的新關鍵字數量
keyword_count: int = 3 # Number of new keywords to generate
lang: LanguageType = "zh" # Language for prompts
class TransformationDescription(BaseModel):
@@ -215,24 +222,27 @@ class ExpertSource(str, Enum):
class ExpertTransformationRequest(BaseModel):
"""Expert Transformation Agent 請求"""
"""Expert Transformation Agent request"""
query: str
category: str
attributes: List[str]
# Expert parameters
expert_count: int = 3 # 專家數量 (2-8)
keywords_per_expert: int = 1 # 每個專家為每個屬性生成幾個關鍵字 (1-3)
custom_experts: Optional[List[str]] = None # 用戶指定專家 ["藥師", "工程師"]
expert_count: int = 3 # Number of experts (2-8)
keywords_per_expert: int = 1 # Keywords per expert per attribute (1-3)
custom_experts: Optional[List[str]] = None # User-specified experts
# Expert source parameters
expert_source: ExpertSource = ExpertSource.LLM # 專家來源
expert_language: str = "en" # 外部來源的語言 (目前只有英文資料)
expert_source: ExpertSource = ExpertSource.LLM # Expert source
expert_language: str = "en" # Language for external sources
# LLM parameters
model: Optional[str] = None
temperature: Optional[float] = 0.7
# Prompt language
lang: LanguageType = "zh"
# ===== Deduplication Agent schemas =====
@@ -243,11 +253,12 @@ class DeduplicationMethod(str, Enum):
class DeduplicationRequest(BaseModel):
"""去重請求"""
"""Deduplication request"""
descriptions: List[ExpertTransformationDescription]
method: DeduplicationMethod = DeduplicationMethod.EMBEDDING # 去重方法
similarity_threshold: float = 0.85 # 餘弦相似度閾值 (0.0-1.0),僅 Embedding 使用
model: Optional[str] = None # Embedding/LLM 模型
method: DeduplicationMethod = DeduplicationMethod.EMBEDDING # Deduplication method
similarity_threshold: float = 0.85 # Cosine similarity threshold (0.0-1.0), only for Embedding
model: Optional[str] = None # Embedding/LLM model
lang: LanguageType = "zh" # Prompt language (for LLM method)
class DescriptionGroup(BaseModel):

View File

@@ -1,20 +1,36 @@
from typing import List, Optional, Dict
import json
DEFAULT_CATEGORIES = ["材料", "功能", "用途", "使用族群", "特性"]
CATEGORY_DESCRIPTIONS = {
"材料": "物件由什麼材料組成",
"功能": "物件能做什麼",
"用途": "物件在什麼場景使用",
"使用族群": "誰會使用這個物件",
"特性": "物件有什麼特徵",
}
from .language_config import (
LanguageType,
DEFAULT_CATEGORIES,
CATEGORY_DESCRIPTIONS,
)
def get_attribute_prompt(query: str, categories: Optional[List[str]] = None) -> str:
def get_default_categories(lang: LanguageType = "zh") -> List[str]:
return DEFAULT_CATEGORIES.get(lang, DEFAULT_CATEGORIES["zh"])
def get_category_descriptions(lang: LanguageType = "zh") -> Dict[str, str]:
return CATEGORY_DESCRIPTIONS.get(lang, CATEGORY_DESCRIPTIONS["zh"])
def get_attribute_prompt(
query: str,
categories: Optional[List[str]] = None,
lang: LanguageType = "zh"
) -> str:
"""Generate prompt with causal chain structure."""
if lang == "en":
prompt = f"""Analyze the attributes of "{query}" in a causal chain format: Materials→Functions→Usages→User Groups.
List 3-5 types of materials, each extending into a complete causal chain.
JSON format:
{{"name": "{query}", "children": [{{"name": "Material Name", "category": "Materials", "children": [{{"name": "Function Name", "category": "Functions", "children": [{{"name": "Usage Name", "category": "Usages", "children": [{{"name": "User Group Name", "category": "User Groups"}}]}}]}}]}}]}}
Return JSON only."""
else:
prompt = f"""分析「{query}」的屬性,以因果鏈方式呈現:材料→功能→用途→使用族群。
請列出 3-5 種材料,每種材料延伸出完整因果鏈。
@@ -27,8 +43,17 @@ JSON 格式:
return prompt
def get_step1_attributes_prompt(query: str) -> str:
"""Step 1: 生成各類別的屬性列表(平行結構)"""
def get_step1_attributes_prompt(query: str, lang: LanguageType = "zh") -> str:
"""Step 1: Generate attribute list for each category (parallel structure)"""
if lang == "en":
return f"""/no_think
Analyze "{query}" and list attributes for the following four categories. List 3-5 common attributes for each category.
Return JSON only, in the following format:
{{"materials": ["material1", "material2", "material3"], "functions": ["function1", "function2", "function3"], "usages": ["usage1", "usage2", "usage3"], "users": ["user group1", "user group2", "user group3"]}}
Object: {query}"""
else:
return f"""/no_think
分析「{query}」,列出以下四個類別的屬性。每個類別列出 3-5 個常見屬性。
@@ -45,10 +70,38 @@ def get_step2_causal_chain_prompt(
usages: List[str],
users: List[str],
existing_chains: List[dict],
chain_index: int
chain_index: int,
lang: LanguageType = "zh"
) -> str:
"""Step 2: 生成單條因果鏈"""
"""Step 2: Generate a single causal chain"""
existing_chains_text = ""
if lang == "en":
if existing_chains:
chains_list = [
f"- {c['material']}{c['function']}{c['usage']}{c['user']}"
for c in existing_chains
]
existing_chains_text = f"""
[Already generated causal chains, do not repeat]
{chr(10).join(chains_list)}
"""
return f"""/no_think
Generate causal chain #{chain_index} for "{query}".
[Available Materials] {', '.join(materials)}
[Available Functions] {', '.join(functions)}
[Available Usages] {', '.join(usages)}
[Available User Groups] {', '.join(users)}
{existing_chains_text}
[Rules]
1. Select one attribute from each category to form a logical causal chain
2. The causal relationship must be logical (materials determine functions, functions determine usages, usages determine user groups)
3. Do not repeat existing causal chains
Return JSON only:
{{"material": "selected material", "function": "selected function", "usage": "selected usage", "user": "selected user group"}}"""
else:
if existing_chains:
chains_list = [
f"- {c['material']}{c['function']}{c['usage']}{c['user']}"
@@ -58,7 +111,6 @@ def get_step2_causal_chain_prompt(
【已生成的因果鏈,請勿重複】
{chr(10).join(chains_list)}
"""
return f"""/no_think
為「{query}」生成第 {chain_index} 條因果鏈。
@@ -76,18 +128,51 @@ def get_step2_causal_chain_prompt(
{{"material": "選擇的材料", "function": "選擇的功能", "usage": "選擇的用途", "user": "選擇的族群"}}"""
def get_flat_attribute_prompt(query: str, categories: Optional[List[str]] = None) -> str:
def get_flat_attribute_prompt(
query: str,
categories: Optional[List[str]] = None,
lang: LanguageType = "zh"
) -> str:
"""Generate prompt with flat/parallel categories (original design)."""
cats = categories if categories else DEFAULT_CATEGORIES
cats = categories if categories else get_default_categories(lang)
cat_descs = get_category_descriptions(lang)
# Build category list
category_lines = []
for cat in cats:
desc = CATEGORY_DESCRIPTIONS.get(cat, f"{cat}的相關屬性")
category_lines.append(f"- {cat}{desc}")
desc = cat_descs.get(cat, f"Related attributes of {cat}" if lang == "en" else f"{cat}的相關屬性")
category_lines.append(f"- {cat}: {desc}")
categories_text = "\n".join(category_lines)
if lang == "en":
prompt = f"""/no_think
You are an object attribute analysis expert. Please break down the user's input object into the following attribute categories.
[Required Categories]
{categories_text}
[Important] The return format must be valid JSON, and each node must have a "name" field:
```json
{{
"name": "Object Name",
"children": [
{{
"name": "Category Name",
"children": [
{{"name": "Attribute 1"}},
{{"name": "Attribute 2"}}
]
}}
]
}}
```
Return JSON only, no other text.
User input: {query}"""
else:
prompt = f"""/no_think
你是一個物件屬性分析專家。請將用戶輸入的物件拆解成以下屬性類別。
@@ -123,9 +208,37 @@ def get_flat_attribute_prompt(query: str, categories: Optional[List[str]] = None
def get_step0_category_analysis_prompt(
query: str,
suggested_count: int = 3,
exclude_categories: List[str] | None = None
exclude_categories: List[str] | None = None,
lang: LanguageType = "zh"
) -> str:
"""Step 0: LLM 分析建議類別"""
"""Step 0: LLM analyzes and suggests categories"""
if lang == "en":
exclude_text = ""
if exclude_categories:
exclude_text = f"\n[Forbidden Categories] {', '.join(exclude_categories)} (These are already fixed categories, do not suggest duplicates)\n"
return f"""/no_think
Analyze "{query}" and suggest {suggested_count} most suitable attribute categories to describe it.
[Common Category References] Characteristics, Shape, Color, Size, Brand, Price Range, Weight, Style, Occasion, Season, Technical Specifications
{exclude_text}
[Important]
1. Choose categories that best describe the essence of this object
2. Categories should have logical relationships
3. Do not choose overly abstract or duplicate categories
4. Must suggest creative categories different from the reference list
Return JSON only:
{{
"categories": [
{{"name": "Category1", "description": "Description1", "order": 0}},
{{"name": "Category2", "description": "Description2", "order": 1}}
]
}}
Object: {query}"""
else:
exclude_text = ""
if exclude_categories:
exclude_text = f"\n【禁止使用的類別】{', '.join(exclude_categories)}(這些已經是固定類別,不要重複建議)\n"
@@ -154,20 +267,34 @@ def get_step0_category_analysis_prompt(
def get_step1_dynamic_attributes_prompt(
query: str,
categories: List # List[CategoryDefinition]
categories: List, # List[CategoryDefinition]
lang: LanguageType = "zh"
) -> str:
"""動態 Step 1 - 根據類別列表生成屬性"""
# 按 order 排序並構建描述
"""Dynamic Step 1 - Generate attributes based on category list"""
# Sort by order and build description
sorted_cats = sorted(categories, key=lambda x: x.order if hasattr(x, 'order') else x.get('order', 0))
category_desc = "\n".join([
f"- {cat.name if hasattr(cat, 'name') else cat['name']}: {cat.description if hasattr(cat, 'description') else cat.get('description', '相關屬性')}"
f"- {cat.name if hasattr(cat, 'name') else cat['name']}: {cat.description if hasattr(cat, 'description') else cat.get('description', 'Related attributes' if lang == 'en' else '相關屬性')}"
for cat in sorted_cats
])
category_keys = [cat.name if hasattr(cat, 'name') else cat['name'] for cat in sorted_cats]
json_template = {cat: ["屬性1", "屬性2", "屬性3"] for cat in category_keys}
if lang == "en":
json_template = {cat: ["attribute1", "attribute2", "attribute3"] for cat in category_keys}
return f"""/no_think
Analyze "{query}" and list attributes for the following categories. List 3-5 common attributes for each category.
[Category List]
{category_desc}
Return JSON only:
{json.dumps(json_template, ensure_ascii=False, indent=2)}
Object: {query}"""
else:
json_template = {cat: ["屬性1", "屬性2", "屬性3"] for cat in category_keys}
return f"""/no_think
分析「{query}」,列出以下類別的屬性。每個類別列出 3-5 個常見屬性。
@@ -185,17 +312,46 @@ def get_step2_dynamic_causal_chain_prompt(
categories: List, # List[CategoryDefinition]
attributes_by_category: Dict[str, List[str]],
existing_chains: List[Dict[str, str]],
chain_index: int
chain_index: int,
lang: LanguageType = "zh"
) -> str:
"""動態 Step 2 - 生成動態類別的因果鏈"""
"""Dynamic Step 2 - Generate causal chains for dynamic categories"""
sorted_cats = sorted(categories, key=lambda x: x.order if hasattr(x, 'order') else x.get('order', 0))
# 構建可選屬性
# Build available attributes
available_attrs = "\n".join([
f"{cat.name if hasattr(cat, 'name') else cat['name']}{', '.join(attributes_by_category.get(cat.name if hasattr(cat, 'name') else cat['name'], []))}"
f"[{cat.name if hasattr(cat, 'name') else cat['name']}] {', '.join(attributes_by_category.get(cat.name if hasattr(cat, 'name') else cat['name'], []))}"
for cat in sorted_cats
])
if lang == "en":
# Already generated causal chains
existing_text = ""
if existing_chains:
chains_list = [
"".join([chain.get(cat.name if hasattr(cat, 'name') else cat['name'], '?') for cat in sorted_cats])
for chain in existing_chains
]
existing_text = "\n[Already generated, do not repeat]\n" + "\n".join([f"- {c}" for c in chains_list])
# JSON template
json_template = {cat.name if hasattr(cat, 'name') else cat['name']: f"selected {cat.name if hasattr(cat, 'name') else cat['name']}" for cat in sorted_cats}
return f"""/no_think
Generate causal chain #{chain_index} for "{query}".
[Available Attributes]
{available_attrs}
{existing_text}
[Rules]
1. Select one attribute from each category
2. Causal relationships must be logical
3. Do not repeat
Return JSON only:
{json.dumps(json_template, ensure_ascii=False, indent=2)}"""
else:
# 已生成的因果鏈
existing_text = ""
if existing_chains:
@@ -203,7 +359,7 @@ def get_step2_dynamic_causal_chain_prompt(
"".join([chain.get(cat.name if hasattr(cat, 'name') else cat['name'], '?') for cat in sorted_cats])
for chain in existing_chains
]
existing_text = f"\n【已生成,請勿重複】\n" + "\n".join([f"- {c}" for c in chains_list])
existing_text = "\n【已生成,請勿重複】\n" + "\n".join([f"- {c}" for c in chains_list])
# JSON 模板
json_template = {cat.name if hasattr(cat, 'name') else cat['name']: f"選擇的{cat.name if hasattr(cat, 'name') else cat['name']}" for cat in sorted_cats}
@@ -230,19 +386,45 @@ def get_step2_dag_relationships_prompt(
query: str,
categories: List, # List[CategoryDefinition]
attributes_by_category: Dict[str, List[str]],
lang: LanguageType = "zh"
) -> str:
"""生成相鄰類別之間的自然關係"""
"""Generate natural relationships between adjacent categories"""
sorted_cats = sorted(categories, key=lambda x: x.order if hasattr(x, 'order') else x.get('order', 0))
# Build attribute listing
attr_listing = "\n".join([
f"{cat.name if hasattr(cat, 'name') else cat['name']}{', '.join(attributes_by_category.get(cat.name if hasattr(cat, 'name') else cat['name'], []))}"
f"[{cat.name if hasattr(cat, 'name') else cat['name']}] {', '.join(attributes_by_category.get(cat.name if hasattr(cat, 'name') else cat['name'], []))}"
for cat in sorted_cats
])
# Build direction hints
direction_hints = "".join([cat.name if hasattr(cat, 'name') else cat['name'] for cat in sorted_cats])
if lang == "en":
return f"""/no_think
Analyze the attribute relationships of "{query}".
{attr_listing}
[Relationship Direction] {direction_hints}
[Rules]
1. Only establish relationships between adjacent categories (e.g., Materials→Functions, Functions→Usages)
2. Only output pairs that have true causal or associative relationships
3. An attribute can connect to multiple downstream attributes, or none at all
4. Not every attribute needs to have connections
5. Relationships should be reasonable and meaningful
Return JSON:
{{
"relationships": [
{{"source_category": "CategoryA", "source": "attribute name", "target_category": "CategoryB", "target": "attribute name"}},
...
]
}}
Return JSON only."""
else:
return f"""/no_think
分析「{query}」的屬性關係。

View File

@@ -1,24 +1,58 @@
"""Expert Transformation Agent 提示詞模組"""
"""Expert Transformation Agent prompts module - Bilingual support"""
from typing import List, Optional
from .language_config import LanguageType
def get_expert_generation_prompt(
query: str,
categories: List[str],
expert_count: int,
custom_experts: Optional[List[str]] = None
custom_experts: Optional[List[str]] = None,
lang: LanguageType = "zh"
) -> str:
"""Step 0: 生成專家團隊(不依賴主題,純隨機多元)"""
"""Step 0: Generate expert team (not dependent on topic, purely random and diverse)"""
import time
import random
# Add timestamp and random number for diversity
seed = int(time.time() * 1000) % 10000
if lang == "en":
custom_text = ""
if custom_experts and len(custom_experts) > 0:
custom_text = f" (Specified: {', '.join(custom_experts[:expert_count])})"
diversity_hints = [
"obscure, non-mainstream, cross-disciplinary",
"rare occupations, emerging fields, fringe disciplines",
"unconventional, innovative, niche specialties",
"future-oriented, experimental, non-traditional",
"cross-cultural, hybrid fields, unique perspectives"
]
hint = random.choice(diversity_hints)
return f"""/no_think
Randomly assemble a team of {expert_count} experts from completely different fields{custom_text}.
[Innovation Requirements] (Random seed: {seed})
- Prioritize {hint} experts
- Avoid common professions (such as doctors, engineers, teachers, lawyers, etc.)
- Each expert must be from a completely unrelated field
- The rarer and more innovative, the better
Return JSON:
{{"experts": [{{"id": "expert-0", "name": "profession", "domain": "field", "perspective": "viewpoint"}}, ...]}}
Rules:
- id should be expert-0 to expert-{expert_count - 1}
- name is the profession name (not a person's name), 2-5 words
- domain should be specific and unique, no duplicate types"""
else:
custom_text = ""
if custom_experts and len(custom_experts) > 0:
custom_text = f"(已指定:{', '.join(custom_experts[:expert_count])}"
# 加入時間戳和隨機數來增加多樣性
seed = int(time.time() * 1000) % 10000
diversity_hints = [
"冷門、非主流、跨領域",
"罕見職業、新興領域、邊緣學科",
@@ -50,12 +84,38 @@ def get_expert_keyword_generation_prompt(
category: str,
attribute: str,
experts: List[dict], # List[ExpertProfile]
keywords_per_expert: int = 1
keywords_per_expert: int = 1,
lang: LanguageType = "zh"
) -> str:
"""Step 1: 專家視角關鍵字生成"""
# 建立專家列表,格式更清晰
"""Step 1: Expert perspective keyword generation"""
# Build expert list in clearer format
experts_list = "\n".join([f"- {exp['id']}: {exp['name']}" for exp in experts])
if lang == "en":
return f"""/no_think
You need to play the role of the following experts to generate innovative keywords for an attribute:
[Expert List]
{experts_list}
[Task]
Attribute: "{attribute}" (Category: {category})
For each expert, please:
1. First understand the professional background, knowledge domain, and work content of that profession
2. Think about "{attribute}" from that profession's unique perspective
3. Generate {keywords_per_expert} innovative keyword(s) related to that specialty (2-6 words)
Keywords must reflect that expert's professional thinking style, for example:
- Accountant viewing "movement""cash flow", "cost-benefit"
- Architect viewing "movement""circulation design", "spatial flow"
- Psychologist viewing "movement""behavioral motivation", "emotional transition"
Return JSON:
{{"keywords": [{{"keyword": "term", "expert_id": "expert-X", "expert_name": "name"}}, ...]}}
Total of {len(experts) * keywords_per_expert} keywords needed, each keyword must be clearly related to the corresponding expert's professional field."""
else:
return f"""/no_think
你需要扮演以下專家,為屬性生成創新關鍵字:
@@ -86,9 +146,25 @@ def get_single_description_prompt(
keyword: str,
expert_id: str,
expert_name: str,
expert_domain: str
expert_domain: str,
lang: LanguageType = "zh"
) -> str:
"""Step 2: 為單一關鍵字生成描述"""
"""Step 2: Generate description for a single keyword"""
if lang == "en":
# If domain is generic, just use profession name
domain_text = f" ({expert_domain} field)" if expert_domain and expert_domain != "Professional Field" else ""
return f"""/no_think
You are a {expert_name}{domain_text}.
Task: Generate an innovative application description for "{query}".
Keyword: {keyword}
From your professional perspective, explain how to apply the concept of "{keyword}" to "{query}". The description should be specific, creative, 15-30 words.
Return JSON only, no other text:
{{"description": "your innovative application description"}}"""
else:
# 如果 domain 是通用的,就只用職業名稱
domain_text = f"{expert_domain}領域)" if expert_domain and expert_domain != "Professional Field" else ""

View File

@@ -0,0 +1,51 @@
"""Language configuration for prompts"""
from enum import Enum
from typing import Literal
class Language(str, Enum):
CHINESE = "zh"
ENGLISH = "en"
LanguageType = Literal["zh", "en"]
# Default categories for each language
DEFAULT_CATEGORIES = {
"zh": ["材料", "功能", "用途", "使用族群", "特性"],
"en": ["Materials", "Functions", "Usages", "User Groups", "Characteristics"],
}
CATEGORY_DESCRIPTIONS = {
"zh": {
"材料": "物件由什麼材料組成",
"功能": "物件能做什麼",
"用途": "物件在什麼場景使用",
"使用族群": "誰會使用這個物件",
"特性": "物件有什麼特徵",
},
"en": {
"Materials": "What materials the object is made of",
"Functions": "What the object can do",
"Usages": "In what scenarios the object is used",
"User Groups": "Who uses this object",
"Characteristics": "What features the object has",
},
}
# Category name mappings between languages
CATEGORY_MAPPING = {
"zh_to_en": {
"材料": "Materials",
"功能": "Functions",
"用途": "Usages",
"使用族群": "User Groups",
"特性": "Characteristics",
},
"en_to_zh": {
"Materials": "材料",
"Functions": "功能",
"Usages": "用途",
"User Groups": "使用族群",
"Characteristics": "特性",
},
}

View File

@@ -1,21 +1,42 @@
"""Transformation Agent 提示詞模組"""
"""Transformation Agent prompts module - Bilingual support"""
from typing import List
from .language_config import LanguageType
def get_keyword_generation_prompt(
category: str,
attributes: List[str],
keyword_count: int = 3
keyword_count: int = 3,
lang: LanguageType = "zh"
) -> str:
"""
Step 1: 生成新關鍵字
Step 1: Generate new keywords
給定類別和現有屬性,生成全新的、有創意的關鍵字。
不考慮原始查詢,只專注於類別本身可能的延伸。
Given a category and existing attributes, generate new, creative keywords.
Don't consider the original query, focus only on possible extensions of the category itself.
"""
attrs_text = "".join(attributes)
attrs_text = ", ".join(attributes) if lang == "en" else "".join(attributes)
if lang == "en":
return f"""/no_think
You are a creative brainstorming expert. Given a category and its existing attributes, please generate new, creative keywords or descriptive phrases.
[Category] {category}
[Existing Attributes] {attrs_text}
[Important Rules]
1. Generate {keyword_count} completely new keywords
2. Keywords must fit within the scope of "{category}" category
3. Keywords should be creative and not duplicate or be too similar to existing attributes
4. Don't consider any specific object, focus only on possible extensions of this category
5. Each keyword should be 2-6 words
Return JSON only:
{{
"keywords": ["keyword1", "keyword2", "keyword3"]
}}"""
else:
return f"""/no_think
你是一個創意發想專家。給定一個類別和該類別下的現有屬性,請生成全新的、有創意的關鍵字或描述片段。
@@ -38,13 +59,35 @@ def get_keyword_generation_prompt(
def get_description_generation_prompt(
query: str,
category: str,
keyword: str
keyword: str,
lang: LanguageType = "zh"
) -> str:
"""
Step 2: 結合原始查詢生成描述
Step 2: Combine with original query to generate description
用新關鍵字創造一個與原始查詢相關的創新應用描述。
Use new keyword to create an innovative application description related to the original query.
"""
if lang == "en":
return f"""/no_think
You are an innovation application expert. Please apply a new keyword concept to a specific object to create an innovative application description.
[Object] {query}
[Category] {category}
[New Keyword] {keyword}
[Task]
Using the concept of "{keyword}", create an innovative application description for "{query}".
The description should be a complete sentence or phrase explaining how to apply this new concept to the object.
[Example Format]
- If the object is "bicycle" and keyword is "monitor", you could generate "bicycle monitors the rider's health status"
- If the object is "umbrella" and keyword is "generate power", you could generate "umbrella generates electricity using raindrop impacts"
Return JSON only:
{{
"description": "innovative application description"
}}"""
else:
return f"""/no_think
你是一個創新應用專家。請將一個新的關鍵字概念應用到特定物件上,創造出創新的應用描述。
@@ -69,14 +112,34 @@ def get_description_generation_prompt(
def get_batch_description_prompt(
query: str,
category: str,
keywords: List[str]
keywords: List[str],
lang: LanguageType = "zh"
) -> str:
"""
批次生成描述(可選的優化版本,一次處理多個關鍵字)
Batch description generation (optional optimized version, process multiple keywords at once)
"""
keywords_text = "".join(keywords)
keywords_json = ", ".join([f'"{k}"' for k in keywords])
keywords_text = ", ".join(keywords) if lang == "en" else "".join(keywords)
if lang == "en":
return f"""/no_think
You are an innovation application expert. Please apply multiple new keyword concepts to a specific object, creating an innovative application description for each keyword.
[Object] {query}
[Category] {category}
[New Keywords] {keywords_text}
[Task]
Create an innovative application description related to "{query}" for each keyword.
Each description should be a complete sentence or phrase.
Return JSON only:
{{
"descriptions": [
{{"keyword": "keyword1", "description": "description1"}},
{{"keyword": "keyword2", "description": "description2"}}
]
}}"""
else:
return f"""/no_think
你是一個創新應用專家。請將多個新的關鍵字概念應用到特定物件上,為每個關鍵字創造創新的應用描述。

View File

@@ -58,7 +58,8 @@ async def execute_step0(
prompt = get_step0_category_analysis_prompt(
request.query,
request.suggested_category_count,
exclude_categories=exclude_categories
exclude_categories=exclude_categories,
lang=request.lang
)
temperature = request.temperature if request.temperature is not None else 0.7
response = await ollama_provider.generate(
@@ -310,7 +311,7 @@ async def generate_sse_events(request: StreamAnalyzeRequest) -> AsyncGenerator[s
# ========== Step 1: Generate Attributes (Dynamic) ==========
yield f"event: step1_start\ndata: {json.dumps({'message': '生成屬性...'}, ensure_ascii=False)}\n\n"
step1_prompt = get_step1_dynamic_attributes_prompt(request.query, final_categories)
step1_prompt = get_step1_dynamic_attributes_prompt(request.query, final_categories, lang=request.lang)
logger.info(f"Step 1 prompt: {step1_prompt[:200]}")
step1_response = await ollama_provider.generate(
@@ -330,6 +331,7 @@ async def generate_sse_events(request: StreamAnalyzeRequest) -> AsyncGenerator[s
query=request.query,
categories=final_categories,
attributes_by_category=step1_result.attributes,
lang=request.lang
)
logger.info(f"Step 2 (relationships) prompt: {step2_prompt[:300]}")

View File

@@ -63,7 +63,8 @@ async def deduplicate_descriptions(request: DeduplicationRequest) -> Deduplicati
# 使用 LLM 成對比較去重
result = await llm_deduplication_service.deduplicate(
descriptions=request.descriptions,
model=request.model
model=request.model,
lang=request.lang
)
return result
except ValueError as e:

View File

@@ -68,7 +68,8 @@ async def generate_expert_transformation_events(
query=request.query,
categories=all_categories,
expert_count=request.expert_count,
custom_experts=actual_custom_experts if actual_custom_experts else None
custom_experts=actual_custom_experts if actual_custom_experts else None,
lang=request.lang
)
logger.info(f"Expert prompt: {expert_prompt[:200]}")
@@ -119,7 +120,8 @@ async def generate_expert_transformation_events(
query=request.query,
categories=all_categories,
expert_count=request.expert_count,
custom_experts=actual_custom_experts if actual_custom_experts else None
custom_experts=actual_custom_experts if actual_custom_experts else None,
lang=request.lang
)
expert_response = await ollama_provider.generate(
@@ -160,7 +162,8 @@ async def generate_expert_transformation_events(
category=request.category,
attribute=attribute,
experts=[e.model_dump() for e in experts],
keywords_per_expert=request.keywords_per_expert
keywords_per_expert=request.keywords_per_expert,
lang=request.lang
)
logger.info(f"Keyword prompt for '{attribute}': {kw_prompt[:300]}")
@@ -214,7 +217,8 @@ async def generate_expert_transformation_events(
keyword=kw.keyword,
expert_id=kw.expert_id,
expert_name=kw.expert_name,
expert_domain=expert_domain
expert_domain=expert_domain,
lang=request.lang
)
desc_response = await ollama_provider.generate(

View File

@@ -0,0 +1,133 @@
"""Patent Search Router - Search for similar patents"""
import logging
from typing import Optional, List
from fastapi import APIRouter
from pydantic import BaseModel
from ..services.patent_search_service import patent_search_service
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/patent", tags=["patent"])
# ===== Request/Response Models =====
class PatentSearchRequest(BaseModel):
"""Patent search request"""
query: str # Search query (description or keywords)
max_results: int = 10 # Maximum results to return (1-20)
class PatentResult(BaseModel):
"""Single patent result"""
publication_number: str
title: str
snippet: str
publication_date: Optional[str] = None
assignee: Optional[str] = None
inventor: Optional[str] = None
status: str # ACTIVE, NOT_ACTIVE, UNKNOWN
pdf_url: Optional[str] = None
thumbnail_url: Optional[str] = None
class PatentSearchResponse(BaseModel):
"""Patent search response"""
query: str
total_results: int
patents: List[PatentResult]
error: Optional[str] = None
class BatchPatentSearchRequest(BaseModel):
"""Batch patent search request - search multiple descriptions"""
queries: List[str] # List of descriptions to search
max_results_per_query: int = 5 # Max results per query
class BatchPatentSearchResult(BaseModel):
"""Results for a single query in batch search"""
query: str
total_results: int
patents: List[PatentResult]
error: Optional[str] = None
class BatchPatentSearchResponse(BaseModel):
"""Batch patent search response"""
results: List[BatchPatentSearchResult]
total_queries: int
# ===== Endpoints =====
@router.post("/search", response_model=PatentSearchResponse)
async def search_patents(request: PatentSearchRequest):
"""
Search for patents similar to the given description/query.
Uses Google Patents to find related patents based on keywords.
"""
logger.info(f"Patent search request: {request.query[:100]}...")
# Limit max_results to reasonable range
max_results = min(max(1, request.max_results), 20)
result = await patent_search_service.search(
query=request.query,
max_results=max_results,
)
return PatentSearchResponse(
query=request.query,
total_results=result.get("total_results", 0),
patents=[PatentResult(**p) for p in result.get("patents", [])],
error=result.get("error"),
)
@router.post("/search/batch", response_model=BatchPatentSearchResponse)
async def batch_search_patents(request: BatchPatentSearchRequest):
"""
Search for patents for multiple descriptions at once.
Useful for checking multiple creative descriptions against patents.
"""
logger.info(f"Batch patent search: {len(request.queries)} queries")
# Limit results per query
max_per_query = min(max(1, request.max_results_per_query), 10)
results: List[BatchPatentSearchResult] = []
for query in request.queries:
result = await patent_search_service.search(
query=query,
max_results=max_per_query,
)
results.append(BatchPatentSearchResult(
query=query,
total_results=result.get("total_results", 0),
patents=[PatentResult(**p) for p in result.get("patents", [])],
error=result.get("error"),
))
return BatchPatentSearchResponse(
results=results,
total_queries=len(request.queries),
)
@router.get("/health")
async def patent_search_health():
"""Check if patent search service is working"""
# Do a simple test search
result = await patent_search_service.search("test", max_results=1)
if result.get("error"):
return {"status": "unhealthy", "error": result["error"]}
return {"status": "healthy"}

View File

@@ -36,7 +36,8 @@ async def generate_transformation_events(
keyword_prompt = get_keyword_generation_prompt(
category=request.category,
attributes=request.attributes,
keyword_count=request.keyword_count
keyword_count=request.keyword_count,
lang=request.lang
)
logger.info(f"Keyword prompt: {keyword_prompt[:200]}")
@@ -61,7 +62,8 @@ async def generate_transformation_events(
desc_prompt = get_batch_description_prompt(
query=request.query,
category=request.category,
keywords=new_keywords
keywords=new_keywords,
lang=request.lang
)
logger.info(f"Description prompt: {desc_prompt[:300]}")

View File

@@ -1,12 +1,12 @@
"""
LLM Deduplication Service - 使用 LLM 成對比較進行去重
LLM Deduplication Service - Using LLM pairwise comparison for deduplication
LLM 判斷兩個描述是否語意重複,透過並行處理加速。
Let LLM determine whether two descriptions are semantically duplicate, accelerated by parallel processing.
"""
import asyncio
import logging
from typing import List, Tuple, Optional
from typing import List, Tuple, Optional, Literal
import httpx
import numpy as np
@@ -18,6 +18,7 @@ from ..models.schemas import (
DeduplicationMethod,
DescriptionGroup,
)
from ..prompts.language_config import LanguageType
logger = logging.getLogger(__name__)
@@ -31,27 +32,20 @@ class LLMDeduplicationService:
self.client = httpx.AsyncClient(timeout=60.0)
self.max_concurrent = 5 # 最大並行數,避免 Ollama 過載
async def compare_pair(
self,
desc1: str,
desc2: str,
model: str,
semaphore: asyncio.Semaphore
) -> bool:
"""
讓 LLM 判斷兩個描述是否語意重複
def _get_comparison_prompt(self, desc1: str, desc2: str, lang: LanguageType = "zh") -> str:
"""Get comparison prompt in the specified language"""
if lang == "en":
return f"""Determine whether the following two innovative descriptions express the same or very similar concepts:
Args:
desc1: 第一個描述
desc2: 第二個描述
model: LLM 模型名稱
semaphore: 並行控制信號量
Description 1: {desc1}
Returns:
bool: 是否為重複描述
"""
async with semaphore: # 控制並行數
prompt = f"""判斷以下兩個創新描述是否表達相同或非常相似的概念:
Description 2: {desc2}
If both descriptions essentially express the same or very similar innovative concept, answer "YES"
If the two descriptions express different innovative concepts, answer "NO"
Only answer YES or NO, no other text"""
else:
return f"""判斷以下兩個創新描述是否表達相同或非常相似的概念:
描述1: {desc1}
@@ -61,6 +55,30 @@ class LLMDeduplicationService:
如果兩者描述不同的創新概念,回答 "NO"
只回答 YES 或 NO不要其他文字"""
async def compare_pair(
self,
desc1: str,
desc2: str,
model: str,
semaphore: asyncio.Semaphore,
lang: LanguageType = "zh"
) -> bool:
"""
Let LLM determine whether two descriptions are semantically duplicate
Args:
desc1: First description
desc2: Second description
model: LLM model name
semaphore: Concurrency control semaphore
lang: Language for the prompt
Returns:
bool: Whether the descriptions are duplicates
"""
async with semaphore: # Control concurrency
prompt = self._get_comparison_prompt(desc1, desc2, lang)
try:
response = await self.client.post(
f"{self.base_url}/api/generate",
@@ -86,26 +104,28 @@ class LLMDeduplicationService:
async def compare_batch(
self,
pairs: List[Tuple[int, int, str, str]],
model: str
model: str,
lang: LanguageType = "zh"
) -> List[Tuple[int, int, bool]]:
"""
並行批次比較多個描述對
Parallel batch comparison of multiple description pairs
Args:
pairs: 待比較的配對列表 [(i, j, desc1, desc2), ...]
model: LLM 模型名稱
pairs: List of pairs to compare [(i, j, desc1, desc2), ...]
model: LLM model name
lang: Language for the prompt
Returns:
比較結果列表 [(i, j, is_similar), ...]
List of comparison results [(i, j, is_similar), ...]
"""
semaphore = asyncio.Semaphore(self.max_concurrent)
async def compare_one(pair: Tuple[int, int, str, str]) -> Tuple[int, int, bool]:
i, j, desc1, desc2 = pair
is_similar = await self.compare_pair(desc1, desc2, model, semaphore)
is_similar = await self.compare_pair(desc1, desc2, model, semaphore, lang)
return (i, j, is_similar)
# 使用 asyncio.gather 並行執行所有比較
# Use asyncio.gather to execute all comparisons in parallel
results = await asyncio.gather(*[compare_one(p) for p in pairs])
return results
@@ -144,17 +164,19 @@ class LLMDeduplicationService:
async def deduplicate(
self,
descriptions: List[ExpertTransformationDescription],
model: Optional[str] = None
model: Optional[str] = None,
lang: LanguageType = "zh"
) -> DeduplicationResult:
"""
使用 LLM 成對比較進行去重
Use LLM pairwise comparison for deduplication
Args:
descriptions: 要去重的描述列表
model: LLM 模型名稱
descriptions: List of descriptions to deduplicate
model: LLM model name
lang: Language for the prompt
Returns:
DeduplicationResult: 去重結果
DeduplicationResult: Deduplication result
"""
model = model or self.default_model
@@ -188,10 +210,10 @@ class LLMDeduplicationService:
))
total_pairs = len(pairs)
logger.info(f"LLM deduplication: {total_pairs} pairs to compare (parallel={self.max_concurrent}, model={model})")
logger.info(f"LLM deduplication: {total_pairs} pairs to compare (parallel={self.max_concurrent}, model={model}, lang={lang})")
# 並行批次比較
results = await self.compare_batch(pairs, model)
# Parallel batch comparison
results = await self.compare_batch(pairs, model, lang)
# 填入相似度矩陣
for i, j, is_similar in results:

View File

@@ -0,0 +1,195 @@
"""Patent Search Service using Google Patents XHR API"""
import httpx
import logging
from typing import List, Optional
from urllib.parse import quote_plus
logger = logging.getLogger(__name__)
class PatentSearchResult:
"""Single patent search result"""
def __init__(
self,
publication_number: str,
title: str,
snippet: str,
publication_date: Optional[str],
assignee: Optional[str],
inventor: Optional[str],
status: str,
pdf_url: Optional[str] = None,
thumbnail_url: Optional[str] = None,
):
self.publication_number = publication_number
self.title = title
self.snippet = snippet
self.publication_date = publication_date
self.assignee = assignee
self.inventor = inventor
self.status = status
self.pdf_url = pdf_url
self.thumbnail_url = thumbnail_url
def to_dict(self):
return {
"publication_number": self.publication_number,
"title": self.title,
"snippet": self.snippet,
"publication_date": self.publication_date,
"assignee": self.assignee,
"inventor": self.inventor,
"status": self.status,
"pdf_url": self.pdf_url,
"thumbnail_url": self.thumbnail_url,
}
class PatentSearchService:
"""Service for searching patents using Google Patents"""
GOOGLE_PATENTS_XHR_URL = "https://patents.google.com/xhr/query"
GOOGLE_PATENTS_PDF_BASE = "https://patentimages.storage.googleapis.com/"
def __init__(self):
self._client: Optional[httpx.AsyncClient] = None
# Browser-like headers to avoid being blocked
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://patents.google.com/",
"Origin": "https://patents.google.com",
}
async def _get_client(self) -> httpx.AsyncClient:
if self._client is None or self._client.is_closed:
self._client = httpx.AsyncClient(
timeout=30.0,
headers=self.DEFAULT_HEADERS,
follow_redirects=True,
)
return self._client
async def close(self):
if self._client and not self._client.is_closed:
await self._client.aclose()
async def search(
self,
query: str,
max_results: int = 10,
) -> dict:
"""
Search Google Patents for relevant patents
Args:
query: Search query (can be a description or keywords)
max_results: Maximum number of results to return
Returns:
Dict with total_results count and list of patent results
"""
try:
client = await self._get_client()
# URL encode the query
encoded_query = quote_plus(query)
url = f"{self.GOOGLE_PATENTS_XHR_URL}?url=q%3D{encoded_query}&exp=&tags="
logger.info(f"Searching patents with query: {query[:100]}...")
response = await client.get(url)
if response.status_code != 200:
logger.error(f"Google Patents API returned status {response.status_code}")
return {
"total_results": 0,
"patents": [],
"error": f"API returned status {response.status_code}"
}
data = response.json()
# Parse results
results = data.get("results", {})
total_num = results.get("total_num_results", 0)
clusters = results.get("cluster", [])
patents: List[PatentSearchResult] = []
if clusters and len(clusters) > 0:
patent_results = clusters[0].get("result", [])
for item in patent_results[:max_results]:
patent_data = item.get("patent", {})
family_meta = patent_data.get("family_metadata", {})
aggregated = family_meta.get("aggregated", {})
country_status = aggregated.get("country_status", [])
status = "UNKNOWN"
if country_status and len(country_status) > 0:
best_stage = country_status[0].get("best_patent_stage", {})
status = best_stage.get("state", "UNKNOWN")
# Build PDF URL if available
pdf_path = patent_data.get("pdf", "")
pdf_url = f"{self.GOOGLE_PATENTS_PDF_BASE}{pdf_path}" if pdf_path else None
# Build thumbnail URL
thumbnail = patent_data.get("thumbnail", "")
thumbnail_url = f"{self.GOOGLE_PATENTS_PDF_BASE}{thumbnail}" if thumbnail else None
patent = PatentSearchResult(
publication_number=patent_data.get("publication_number", ""),
title=self._clean_html(patent_data.get("title", "")),
snippet=self._clean_html(patent_data.get("snippet", "")),
publication_date=patent_data.get("publication_date"),
assignee=patent_data.get("assignee"),
inventor=patent_data.get("inventor"),
status=status,
pdf_url=pdf_url,
thumbnail_url=thumbnail_url,
)
patents.append(patent)
logger.info(f"Found {total_num} total patents, returning {len(patents)}")
return {
"total_results": total_num,
"patents": [p.to_dict() for p in patents],
}
except httpx.HTTPError as e:
logger.error(f"HTTP error searching patents: {e}")
return {
"total_results": 0,
"patents": [],
"error": str(e)
}
except Exception as e:
logger.error(f"Error searching patents: {e}")
return {
"total_results": 0,
"patents": [],
"error": str(e)
}
def _clean_html(self, text: str) -> str:
"""Remove HTML entities and tags from text"""
if not text:
return ""
# Replace common HTML entities
text = text.replace("…", "...")
text = text.replace("&", "&")
text = text.replace("&lt;", "<")
text = text.replace("&gt;", ">")
text = text.replace("&quot;", '"')
text = text.replace("&#39;", "'")
return text.strip()
# Singleton instance
patent_search_service = PatentSearchService()

View File

@@ -1,17 +1,24 @@
import { useState, useRef, useCallback, useEffect } from 'react';
import { ConfigProvider, Layout, theme, Typography, Space, Tabs, Slider, Radio } from 'antd';
import { ApartmentOutlined, ThunderboltOutlined, FilterOutlined } from '@ant-design/icons';
import { useState, useRef, useCallback, useEffect, useMemo } from 'react';
import { ConfigProvider, Layout, theme, Typography, Space, Tabs, Slider, Radio, Switch, Segmented } from 'antd';
import { ApartmentOutlined, ThunderboltOutlined, FilterOutlined, SwapOutlined, FileSearchOutlined, GlobalOutlined } from '@ant-design/icons';
import { ThemeToggle } from './components/ThemeToggle';
import { InputPanel } from './components/InputPanel';
import { TransformationInputPanel } from './components/TransformationInputPanel';
import { MindmapPanel } from './components/MindmapPanel';
import { TransformationPanel } from './components/TransformationPanel';
import { DeduplicationPanel } from './components/DeduplicationPanel';
import { PatentSearchPanel } from './components/PatentSearchPanel';
import { DualPathInputPanel } from './components/DualPathInputPanel';
import { DualPathMindmapPanel } from './components/DualPathMindmapPanel';
import { CrossoverPanel } from './components/CrossoverPanel';
import { useAttribute } from './hooks/useAttribute';
import { useDualPathAttribute } from './hooks/useDualPathAttribute';
import { getModels } from './services/api';
import { crossoverPairsToDAGs, type CrossoverDAGResult } from './utils/crossoverToDAG';
import { DualTransformationPanel } from './components/DualTransformationPanel';
import type { MindmapDAGRef } from './components/MindmapDAG';
import type { TransformationDAGRef } from './components/TransformationDAG';
import type { CategoryMode, ExpertSource, ExpertTransformationDAGResult, DeduplicationMethod } from './types';
import type { CategoryMode, ExpertSource, ExpertTransformationDAGResult, DeduplicationMethod, ExpertMode, CrossoverPair, PromptLanguage } from './types';
const { Header, Sider, Content } = Layout;
const { Title } = Typography;
@@ -24,7 +31,15 @@ interface VisualSettings {
function App() {
const [isDark, setIsDark] = useState(true);
const [activeTab, setActiveTab] = useState<string>('attribute');
const [dualPathMode, setDualPathMode] = useState(false);
const [promptLanguage, setPromptLanguage] = useState<PromptLanguage>('zh');
// Single path hook
const { loading, progress, error, currentResult, history, analyze, loadFromHistory } = useAttribute();
// Dual path hook
const dualPath = useDualPathAttribute();
const [visualSettings, setVisualSettings] = useState<VisualSettings>({
nodeSpacing: 32,
fontSize: 14,
@@ -32,6 +47,21 @@ function App() {
const mindmapRef = useRef<MindmapDAGRef>(null);
const transformationRef = useRef<TransformationDAGRef>(null);
// Dual path expert mode
const [expertMode, setExpertMode] = useState<ExpertMode>('shared');
const [selectedCrossoverPairs, setSelectedCrossoverPairs] = useState<CrossoverPair[]>([]);
// Convert selected crossover pairs to two separate DAGs for dual transformation
const crossoverDAGs = useMemo((): CrossoverDAGResult | null => {
if (selectedCrossoverPairs.length === 0) return null;
if (!dualPath.pathA.result || !dualPath.pathB.result) return null;
return crossoverPairsToDAGs(
selectedCrossoverPairs,
dualPath.pathA.result,
dualPath.pathB.result
);
}, [selectedCrossoverPairs, dualPath.pathA.result, dualPath.pathB.result]);
// Transformation Agent settings
const [transformModel, setTransformModel] = useState<string>('');
const [transformTemperature, setTransformTemperature] = useState<number>(0.95);
@@ -83,9 +113,10 @@ function App() {
chainCount?: number,
categoryMode?: CategoryMode,
customCategories?: string[],
suggestedCategoryCount?: number
suggestedCategoryCount?: number,
lang?: PromptLanguage
) => {
await analyze(query, model, temperature, chainCount, categoryMode, customCategories, suggestedCategoryCount);
await analyze(query, model, temperature, chainCount, categoryMode, customCategories, suggestedCategoryCount, lang || promptLanguage);
};
const handleResetView = useCallback(() => {
@@ -96,6 +127,30 @@ function App() {
setShouldStartTransform(true);
}, []);
// Dual path analysis handler
const handleDualPathAnalyze = useCallback(async (
queryA: string,
queryB: string,
options?: {
model?: string;
temperature?: number;
chainCount?: number;
categoryMode?: CategoryMode;
customCategories?: string[];
suggestedCategoryCount?: number;
lang?: PromptLanguage;
}
) => {
await dualPath.analyzeParallel(queryA, queryB, { ...options, lang: options?.lang || promptLanguage });
}, [dualPath, promptLanguage]);
// Handle mode switch
const handleModeSwitch = useCallback((checked: boolean) => {
setDualPathMode(checked);
// Reset to attribute tab when switching modes
setActiveTab('attribute');
}, []);
return (
<ConfigProvider
theme={{
@@ -140,7 +195,31 @@ function App() {
Novelty Seeking
</Title>
</Space>
<Space align="center" size="middle">
<Space size="small">
<Typography.Text type="secondary">Single</Typography.Text>
<Switch
checked={dualPathMode}
onChange={handleModeSwitch}
checkedChildren={<SwapOutlined />}
unCheckedChildren={<ApartmentOutlined />}
/>
<Typography.Text type="secondary">Dual</Typography.Text>
</Space>
<Space size="small">
<GlobalOutlined style={{ color: isDark ? '#177ddc' : '#1890ff' }} />
<Segmented
size="small"
value={promptLanguage}
onChange={(value) => setPromptLanguage(value as PromptLanguage)}
options={[
{ label: '中文', value: 'zh' },
{ label: 'EN', value: 'en' },
]}
/>
</Space>
<ThemeToggle isDark={isDark} onToggle={setIsDark} />
</Space>
</Header>
<Layout>
<Content
@@ -155,7 +234,98 @@ function App() {
onChange={setActiveTab}
style={{ height: '100%' }}
tabBarStyle={{ marginBottom: 8 }}
items={[
items={dualPathMode ? [
// ===== Dual Path Mode Tabs =====
{
key: 'attribute',
label: (
<span>
<SwapOutlined style={{ marginRight: 8 }} />
Dual Path Attribute
</span>
),
children: (
<div style={{ height: 'calc(100vh - 140px)' }}>
<DualPathMindmapPanel
pathA={dualPath.pathA}
pathB={dualPath.pathB}
isDark={isDark}
visualSettings={visualSettings}
/>
</div>
),
},
{
key: 'crossover',
label: (
<span>
<SwapOutlined style={{ marginRight: 8 }} />
Crossover
</span>
),
children: (
<div style={{ height: 'calc(100vh - 140px)', padding: 16 }}>
<CrossoverPanel
pathAResult={dualPath.pathA.result}
pathBResult={dualPath.pathB.result}
isDark={isDark}
expertMode={expertMode}
onExpertModeChange={setExpertMode}
onCrossoverReady={setSelectedCrossoverPairs}
/>
</div>
),
},
{
key: 'transformation',
label: (
<span>
<ThunderboltOutlined style={{ marginRight: 8 }} />
Transformation Agent
{crossoverDAGs && (
<span style={{ marginLeft: 4, fontSize: 10, opacity: 0.7 }}>
(A:{crossoverDAGs.pathA.nodes.length} / B:{crossoverDAGs.pathB.nodes.length})
</span>
)}
</span>
),
children: (
<div style={{ height: 'calc(100vh - 140px)' }}>
<DualTransformationPanel
crossoverDAGA={crossoverDAGs?.pathA ?? null}
crossoverDAGB={crossoverDAGs?.pathB ?? null}
isDark={isDark}
model={transformModel}
temperature={transformTemperature}
expertConfig={expertConfig}
expertSource={expertSource}
expertLanguage={expertLanguage}
lang={promptLanguage}
shouldStartTransform={shouldStartTransform}
onTransformComplete={() => setShouldStartTransform(false)}
onLoadingChange={setTransformLoading}
/>
</div>
),
},
{
key: 'patent',
label: (
<span>
<FileSearchOutlined style={{ marginRight: 8 }} />
Patent Search
</span>
),
children: (
<div style={{ height: 'calc(100vh - 140px)' }}>
<PatentSearchPanel
isDark={isDark}
/>
</div>
),
},
] : [
// ===== Single Path Mode Tabs =====
{
key: 'attribute',
label: (
@@ -196,6 +366,7 @@ function App() {
expertConfig={expertConfig}
expertSource={expertSource}
expertLanguage={expertLanguage}
lang={promptLanguage}
shouldStartTransform={shouldStartTransform}
onTransformComplete={() => setShouldStartTransform(false)}
onLoadingChange={setTransformLoading}
@@ -221,6 +392,24 @@ function App() {
onThresholdChange={setDeduplicationThreshold}
method={deduplicationMethod}
onMethodChange={setDeduplicationMethod}
lang={promptLanguage}
/>
</div>
),
},
{
key: 'patent',
label: (
<span>
<FileSearchOutlined style={{ marginRight: 8 }} />
Patent Search
</span>
),
children: (
<div style={{ height: 'calc(100vh - 140px)' }}>
<PatentSearchPanel
descriptions={transformationResult?.results.flatMap(r => r.descriptions)}
isDark={isDark}
/>
</div>
),
@@ -236,24 +425,54 @@ function App() {
overflow: 'auto',
}}
>
{activeTab === 'attribute' && (
{activeTab === 'attribute' && !dualPathMode && (
<InputPanel
loading={loading}
progress={progress}
history={history}
currentResult={currentResult}
onAnalyze={handleAnalyze}
onLoadHistory={loadFromHistory}
onLoadHistory={(item, lang) => loadFromHistory(item, lang || promptLanguage)}
onResetView={handleResetView}
visualSettings={visualSettings}
onVisualSettingsChange={setVisualSettings}
lang={promptLanguage}
/>
)}
{activeTab === 'attribute' && dualPathMode && (
<DualPathInputPanel
onAnalyze={handleDualPathAnalyze}
loadingA={dualPath.pathA.loading}
loadingB={dualPath.pathB.loading}
progressA={dualPath.pathA.progress}
progressB={dualPath.pathB.progress}
availableModels={availableModels}
lang={promptLanguage}
/>
)}
{activeTab === 'crossover' && dualPathMode && (
<div style={{ padding: 16 }}>
<Typography.Title level={5} style={{ marginBottom: 16 }}>
<SwapOutlined style={{ marginRight: 8 }} />
Crossover Settings
</Typography.Title>
<Typography.Text type="secondary">
Select attribute pairs in the main panel to create crossover combinations.
{selectedCrossoverPairs.length > 0 && (
<div style={{ marginTop: 8 }}>
<Typography.Text strong>
{selectedCrossoverPairs.length} pairs selected
</Typography.Text>
</div>
)}
</Typography.Text>
</div>
)}
{activeTab === 'transformation' && (
<TransformationInputPanel
onTransform={handleTransform}
loading={transformLoading}
hasData={!!currentResult}
hasData={dualPathMode ? !!crossoverDAGs : !!currentResult}
isDark={isDark}
model={transformModel}
temperature={transformTemperature}

View File

@@ -0,0 +1,298 @@
import { useEffect, useState } from 'react';
import {
Empty,
Card,
Button,
Statistic,
Row,
Col,
Typography,
Space,
Badge,
Collapse,
Checkbox,
Radio,
} from 'antd';
import {
SwapOutlined,
CheckCircleOutlined,
ReloadOutlined,
UnorderedListOutlined,
TableOutlined,
} from '@ant-design/icons';
import type { AttributeDAG, CrossoverPair, ExpertMode } from '../types';
import { useAttributeCrossover } from '../hooks/useAttributeCrossover';
import { CrossoverCard } from './crossover/CrossoverCard';
import { CrossoverMatrix } from './crossover/CrossoverMatrix';
import { CrossoverPreview } from './crossover/CrossoverPreview';
const { Text } = Typography;
interface CrossoverPanelProps {
pathAResult: AttributeDAG | null;
pathBResult: AttributeDAG | null;
isDark: boolean;
expertMode: ExpertMode;
onExpertModeChange: (mode: ExpertMode) => void;
onCrossoverReady?: (selectedPairs: CrossoverPair[]) => void;
}
type ViewMode = 'list' | 'matrix';
export function CrossoverPanel({
pathAResult,
pathBResult,
isDark,
expertMode,
onExpertModeChange,
onCrossoverReady,
}: CrossoverPanelProps) {
const [viewMode, setViewMode] = useState<ViewMode>('list');
const {
pairs,
selectedPairs,
pairsByType,
crossTypeStats,
applyPairs,
togglePairSelection,
selectPairsByType,
selectAll,
clearPairs,
} = useAttributeCrossover();
// Generate pairs when both results are available
useEffect(() => {
if (pathAResult && pathBResult) {
applyPairs(pathAResult, pathBResult);
} else {
clearPairs();
}
}, [pathAResult, pathBResult, applyPairs, clearPairs]);
// Notify parent when selection changes
useEffect(() => {
onCrossoverReady?.(selectedPairs);
}, [selectedPairs, onCrossoverReady]);
// Render when no data
if (!pathAResult || !pathBResult) {
return (
<div style={{
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
height: '100%',
}}>
<Empty
description={
<Space direction="vertical" align="center">
<Text>Complete both Path A and Path B analysis first</Text>
<Text type="secondary">
{!pathAResult && !pathBResult
? 'Neither path has been analyzed'
: !pathAResult
? 'Path A has not been analyzed'
: 'Path B has not been analyzed'}
</Text>
</Space>
}
/>
</div>
);
}
// Generate cross type labels dynamically
const getCrossTypeLabel = (crossType: string): string => {
if (crossType.startsWith('same-')) {
const category = crossType.replace('same-', '');
return `Same Category: ${category}`;
}
if (crossType.startsWith('cross-')) {
const parts = crossType.replace('cross-', '').split('-');
if (parts.length >= 2) {
return `Cross: ${parts[0]} × ${parts.slice(1).join('-')}`;
}
}
return crossType;
};
const renderListView = () => {
const crossTypes = Object.keys(pairsByType);
if (crossTypes.length === 0) {
return <Empty description="No crossover pairs generated" />;
}
const collapseItems = crossTypes.map(type => {
const typePairs = pairsByType[type];
const stats = crossTypeStats[type];
const label = getCrossTypeLabel(type);
return {
key: type,
label: (
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>
<Checkbox
checked={stats.selected === stats.total}
indeterminate={stats.selected > 0 && stats.selected < stats.total}
onClick={(e) => e.stopPropagation()}
onChange={(e) => selectPairsByType(type, e.target.checked)}
/>
<Text>{label}</Text>
<Badge
count={`${stats.selected}/${stats.total}`}
style={{
backgroundColor: stats.selected > 0 ? '#52c41a' : '#d9d9d9',
}}
/>
</div>
),
children: (
<div style={{
display: 'grid',
gridTemplateColumns: 'repeat(auto-fill, minmax(280px, 1fr))',
gap: 8,
}}>
{typePairs.map(pair => (
<CrossoverCard
key={pair.id}
pair={pair}
onToggle={togglePairSelection}
isDark={isDark}
/>
))}
</div>
),
};
});
return (
<Collapse
items={collapseItems}
defaultActiveKey={crossTypes.filter(t => t.startsWith('same-'))}
/>
);
};
const renderMatrixView = () => {
return (
<CrossoverMatrix
dagA={pathAResult}
dagB={pathBResult}
pairs={pairs}
onTogglePair={togglePairSelection}
isDark={isDark}
/>
);
};
return (
<div style={{ height: '100%', display: 'flex', flexDirection: 'column' }}>
{/* Statistics Header */}
<Card size="small" style={{ marginBottom: 16 }}>
<Row gutter={16}>
<Col span={6}>
<Statistic
title="Total Pairs"
value={pairs.length}
prefix={<SwapOutlined />}
/>
</Col>
<Col span={6}>
<Statistic
title="Selected"
value={selectedPairs.length}
prefix={<CheckCircleOutlined />}
valueStyle={{ color: '#52c41a' }}
/>
</Col>
<Col span={6}>
<Statistic
title="Path A Attrs"
value={pathAResult.nodes.length}
/>
</Col>
<Col span={6}>
<Statistic
title="Path B Attrs"
value={pathBResult.nodes.length}
/>
</Col>
</Row>
</Card>
{/* Selection Preview */}
<CrossoverPreview
selectedPairs={selectedPairs}
dagA={pathAResult}
dagB={pathBResult}
isDark={isDark}
/>
{/* Expert Mode Selection */}
<Card size="small" style={{ marginBottom: 16 }}>
<Space direction="vertical" style={{ width: '100%' }}>
<Text strong>Expert Team Mode</Text>
<Radio.Group
value={expertMode}
onChange={(e) => onExpertModeChange(e.target.value)}
buttonStyle="solid"
>
<Radio.Button value="shared">
Shared Experts
</Radio.Button>
<Radio.Button value="independent">
Independent Experts
</Radio.Button>
</Radio.Group>
<Text type="secondary" style={{ fontSize: 12 }}>
{expertMode === 'shared'
? 'Both paths use the same expert team for crossover transformation'
: 'Each path uses its own expert team, combined for crossover'}
</Text>
</Space>
</Card>
{/* Actions */}
<div style={{ marginBottom: 16, display: 'flex', gap: 8 }}>
<Button
icon={<CheckCircleOutlined />}
onClick={() => selectAll(true)}
>
Select All
</Button>
<Button
onClick={() => selectAll(false)}
>
Deselect All
</Button>
<Button
icon={<ReloadOutlined />}
onClick={() => applyPairs(pathAResult, pathBResult)}
>
Regenerate
</Button>
<div style={{ flex: 1 }} />
<Radio.Group
value={viewMode}
onChange={(e) => setViewMode(e.target.value)}
buttonStyle="solid"
size="small"
>
<Radio.Button value="list">
<UnorderedListOutlined /> List
</Radio.Button>
<Radio.Button value="matrix">
<TableOutlined /> Matrix
</Radio.Button>
</Radio.Group>
</div>
{/* Content */}
<div style={{ flex: 1, overflow: 'auto' }}>
{viewMode === 'list' ? renderListView() : renderMatrixView()}
</div>
</div>
);
}

View File

@@ -26,6 +26,7 @@ import type {
ExpertTransformationDAGResult,
ExpertTransformationDescription,
DeduplicationMethod,
PromptLanguage,
} from '../types';
const { Title, Text } = Typography;
@@ -37,6 +38,7 @@ interface DeduplicationPanelProps {
onThresholdChange: (value: number) => void;
method: DeduplicationMethod;
onMethodChange?: (method: DeduplicationMethod) => void; // Optional, handled in App.tsx sidebar
lang?: PromptLanguage;
}
/**
@@ -48,6 +50,7 @@ export const DeduplicationPanel: React.FC<DeduplicationPanelProps> = ({
threshold,
onThresholdChange,
method,
lang = 'zh',
// onMethodChange is handled in App.tsx sidebar
}) => {
const { loading, result, error, progress, deduplicate, clearResult } = useDeduplication();
@@ -70,7 +73,7 @@ export const DeduplicationPanel: React.FC<DeduplicationPanelProps> = ({
const handleDeduplicate = () => {
if (allDescriptions.length > 0) {
deduplicate(allDescriptions, threshold, method);
deduplicate(allDescriptions, threshold, method, lang);
}
};

View File

@@ -0,0 +1,312 @@
import { useState, useEffect } from 'react';
import {
Input,
Button,
Select,
Typography,
Space,
message,
Slider,
Collapse,
Progress,
Card,
Alert,
Tag,
Divider,
} from 'antd';
import {
SearchOutlined,
LoadingOutlined,
SwapOutlined,
} from '@ant-design/icons';
import type { CategoryMode, DAGProgress, PromptLanguage } from '../types';
import { getModels } from '../services/api';
import { CategorySelector } from './CategorySelector';
const { TextArea } = Input;
const { Text } = Typography;
interface DualPathInputPanelProps {
onAnalyze: (queryA: string, queryB: string, options?: {
model?: string;
temperature?: number;
chainCount?: number;
categoryMode?: CategoryMode;
customCategories?: string[];
suggestedCategoryCount?: number;
lang?: PromptLanguage;
}) => Promise<void>;
loadingA: boolean;
loadingB: boolean;
progressA: DAGProgress;
progressB: DAGProgress;
availableModels?: string[];
lang?: PromptLanguage;
}
export function DualPathInputPanel({
onAnalyze,
loadingA,
loadingB,
progressA,
progressB,
availableModels: propModels,
lang = 'zh',
}: DualPathInputPanelProps) {
const [queryA, setQueryA] = useState('');
const [queryB, setQueryB] = useState('');
const [models, setModels] = useState<string[]>(propModels || []);
const [selectedModel, setSelectedModel] = useState<string | undefined>();
const [loadingModels, setLoadingModels] = useState(false);
const [temperature, setTemperature] = useState(0.7);
const [chainCount, setChainCount] = useState(5);
// Category settings
const [categoryMode, setCategoryMode] = useState<CategoryMode>('dynamic_auto' as CategoryMode);
const [customCategories, setCustomCategories] = useState<string[]>([]);
const [suggestedCategoryCount, setSuggestedCategoryCount] = useState(3);
const isLoading = loadingA || loadingB;
useEffect(() => {
if (propModels && propModels.length > 0) {
setModels(propModels);
if (!selectedModel) {
const defaultModel = propModels.find((m) => m.includes('qwen3')) || propModels[0];
setSelectedModel(defaultModel);
}
return;
}
async function fetchModels() {
setLoadingModels(true);
try {
const response = await getModels();
setModels(response.models);
if (response.models.length > 0 && !selectedModel) {
const defaultModel = response.models.find((m) => m.includes('qwen3')) || response.models[0];
setSelectedModel(defaultModel);
}
} catch {
message.error('Failed to fetch models');
} finally {
setLoadingModels(false);
}
}
fetchModels();
}, [propModels]);
const handleAnalyze = async () => {
if (!queryA.trim() || !queryB.trim()) {
message.warning(lang === 'zh' ? '請輸入兩個路徑的查詢內容' : 'Please enter queries for both paths');
return;
}
try {
await onAnalyze(queryA.trim(), queryB.trim(), {
model: selectedModel,
temperature,
chainCount,
categoryMode,
customCategories: customCategories.length > 0 ? customCategories : undefined,
suggestedCategoryCount,
lang,
});
} catch {
message.error(lang === 'zh' ? '分析失敗' : 'Analysis failed');
}
};
const handleSwapQueries = () => {
const temp = queryA;
setQueryA(queryB);
setQueryB(temp);
};
const renderProgressIndicator = (label: string, progress: DAGProgress, loading: boolean) => {
if (progress.step === 'idle' && !loading) return null;
if (progress.step === 'done') return null;
const percent = progress.step === 'step0'
? 15
: progress.step === 'step1'
? 50
: progress.step === 'relationships'
? 85
: 100;
return (
<div style={{ marginTop: 8 }}>
<Text type="secondary" style={{ fontSize: 12 }}>{label}: {progress.message}</Text>
<Progress
percent={Math.round(percent)}
size="small"
status={progress.step === 'error' ? 'exception' : 'active'}
strokeColor={{ from: '#108ee9', to: '#87d068' }}
/>
</div>
);
};
const collapseItems = [
{
key: 'categories',
label: 'Category Settings',
children: (
<CategorySelector
mode={categoryMode}
onModeChange={setCategoryMode}
customCategories={customCategories}
onCustomCategoriesChange={setCustomCategories}
suggestedCount={suggestedCategoryCount}
onSuggestedCountChange={setSuggestedCategoryCount}
disabled={isLoading}
/>
),
},
{
key: 'llm',
label: 'LLM Parameters',
children: (
<Space direction="vertical" style={{ width: '100%' }} size="middle">
<div>
<Text type="secondary" style={{ fontSize: 12 }}>Temperature: {temperature}</Text>
<Slider
min={0}
max={1}
step={0.1}
value={temperature}
onChange={setTemperature}
marks={{ 0: '0', 0.5: '0.5', 1: '1' }}
disabled={isLoading}
/>
</div>
<div>
<Text type="secondary" style={{ fontSize: 12 }}>Chain Count: {chainCount}</Text>
<Slider
min={1}
max={10}
step={1}
value={chainCount}
onChange={setChainCount}
marks={{ 1: '1', 5: '5', 10: '10' }}
disabled={isLoading}
/>
</div>
</Space>
),
},
];
return (
<div style={{
display: 'flex',
flexDirection: 'column',
height: '100%',
padding: 16,
gap: 16,
}}>
{/* Dual Path Input Card */}
<Card
size="small"
title={<Text strong>Dual Path Analysis</Text>}
styles={{ body: { padding: 12 } }}
>
<Space direction="vertical" style={{ width: '100%' }} size="middle">
{/* Model Selection */}
<Select
style={{ width: '100%' }}
value={selectedModel}
onChange={setSelectedModel}
loading={loadingModels}
placeholder="Select a model"
options={models.map((m) => ({ label: m, value: m }))}
size="middle"
disabled={isLoading}
/>
{/* Path A Input */}
<div>
<Tag color="blue" style={{ marginBottom: 4 }}>Path A</Tag>
<TextArea
value={queryA}
onChange={(e) => setQueryA(e.target.value)}
placeholder="Enter first object (e.g., umbrella)"
autoSize={{ minRows: 1, maxRows: 2 }}
disabled={isLoading}
/>
{renderProgressIndicator('Path A', progressA, loadingA)}
</div>
{/* Swap Button */}
<div style={{ textAlign: 'center' }}>
<Button
icon={<SwapOutlined rotate={90} />}
size="small"
onClick={handleSwapQueries}
disabled={isLoading}
>
Swap
</Button>
</div>
{/* Path B Input */}
<div>
<Tag color="green" style={{ marginBottom: 4 }}>Path B</Tag>
<TextArea
value={queryB}
onChange={(e) => setQueryB(e.target.value)}
placeholder="Enter second object (e.g., bicycle)"
autoSize={{ minRows: 1, maxRows: 2 }}
disabled={isLoading}
/>
{renderProgressIndicator('Path B', progressB, loadingB)}
</div>
{/* Analyze Button */}
<Button
type="primary"
icon={<SearchOutlined />}
onClick={handleAnalyze}
loading={isLoading}
block
size="large"
disabled={!queryA.trim() || !queryB.trim()}
>
{isLoading ? 'Analyzing...' : 'Analyze Both'}
</Button>
</Space>
</Card>
{/* Combined Progress Alert */}
{isLoading && (
<Alert
type="info"
icon={<LoadingOutlined spin />}
message="Parallel Analysis in Progress"
description={
<Space direction="vertical" style={{ width: '100%' }}>
<div>
<Tag color="blue">A</Tag> {progressA.message || 'Waiting...'}
</div>
<div>
<Tag color="green">B</Tag> {progressB.message || 'Waiting...'}
</div>
</Space>
}
showIcon
/>
)}
<Divider style={{ margin: '4px 0' }} />
{/* Settings Collapse */}
<Collapse
items={collapseItems}
defaultActiveKey={[]}
size="small"
style={{ background: 'transparent' }}
/>
</div>
);
}

View File

@@ -0,0 +1,191 @@
import { Empty, Spin, Tag, Typography } from 'antd';
import type { PathState } from '../types';
import { MindmapDAG } from './MindmapDAG';
const { Text } = Typography;
interface VisualSettings {
nodeSpacing: number;
fontSize: number;
}
interface DualPathMindmapPanelProps {
pathA: PathState;
pathB: PathState;
isDark: boolean;
visualSettings: VisualSettings;
}
interface SinglePathViewProps {
path: PathState;
label: string;
color: 'blue' | 'green';
isDark: boolean;
visualSettings: VisualSettings;
}
function SinglePathView({ path, label, color, isDark, visualSettings }: SinglePathViewProps) {
const { result, loading, error, query, progress } = path;
// Header with label
const headerStyle: React.CSSProperties = {
padding: '6px 12px',
background: isDark ? '#1f1f1f' : '#fafafa',
borderBottom: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
display: 'flex',
alignItems: 'center',
gap: 8,
minHeight: 36,
};
const contentStyle: React.CSSProperties = {
flex: 1,
position: 'relative',
overflow: 'hidden',
};
const renderContent = () => {
if (loading) {
return (
<div style={{
display: 'flex',
flexDirection: 'column',
justifyContent: 'center',
alignItems: 'center',
height: '100%',
gap: 8,
}}>
<Spin size="large" />
<Text type="secondary">{progress.message || 'Analyzing...'}</Text>
</div>
);
}
if (error) {
return (
<div style={{
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
height: '100%',
}}>
<Empty description={error} />
</div>
);
}
if (!result) {
return (
<div style={{
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
height: '100%',
}}>
<Empty description={`Enter a query for ${label}`} />
</div>
);
}
return (
<MindmapDAG
data={result}
isDark={isDark}
visualSettings={visualSettings}
/>
);
};
return (
<div style={{ display: 'flex', flexDirection: 'column', height: '100%' }}>
<div style={headerStyle}>
<Tag color={color}>{label}</Tag>
{result && (
<Text strong style={{ flex: 1 }}>
{result.query}
</Text>
)}
{!result && query && (
<Text type="secondary" style={{ flex: 1 }}>
{query}
</Text>
)}
{result && (
<Text type="secondary" style={{ fontSize: 12 }}>
{result.nodes.length} attributes
</Text>
)}
</div>
<div style={contentStyle}>
{renderContent()}
</div>
</div>
);
}
export function DualPathMindmapPanel({
pathA,
pathB,
isDark,
visualSettings,
}: DualPathMindmapPanelProps) {
const containerStyle: React.CSSProperties = {
display: 'flex',
flexDirection: 'column',
height: '100%',
gap: 2,
};
const pathContainerStyle: React.CSSProperties = {
flex: 1,
minHeight: 0,
borderRadius: 6,
overflow: 'hidden',
border: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
};
const dividerStyle: React.CSSProperties = {
height: 4,
background: isDark ? '#303030' : '#f0f0f0',
cursor: 'row-resize',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
};
return (
<div style={containerStyle}>
{/* Path A - Top Half */}
<div style={pathContainerStyle}>
<SinglePathView
path={pathA}
label="Path A"
color="blue"
isDark={isDark}
visualSettings={visualSettings}
/>
</div>
{/* Divider */}
<div style={dividerStyle}>
<div style={{
width: 40,
height: 3,
borderRadius: 2,
background: isDark ? '#505050' : '#d0d0d0',
}} />
</div>
{/* Path B - Bottom Half */}
<div style={pathContainerStyle}>
<SinglePathView
path={pathB}
label="Path B"
color="green"
isDark={isDark}
visualSettings={visualSettings}
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,356 @@
import { useCallback, useEffect, useMemo, useState } from 'react';
import { Empty, Spin, Card, Space, Typography, Tag, Button, Progress } from 'antd';
import { ThunderboltOutlined, ReloadOutlined } from '@ant-design/icons';
import type { AttributeDAG, ExpertSource, ExpertTransformationDAGResult, PromptLanguage } from '../types';
import { TransformationDAG } from './TransformationDAG';
import { useExpertTransformation } from '../hooks/useExpertTransformation';
const { Text } = Typography;
interface DualTransformationPanelProps {
crossoverDAGA: AttributeDAG | null; // Path A with attributes from B
crossoverDAGB: AttributeDAG | null; // Path B with attributes from A
isDark: boolean;
model?: string;
temperature?: number;
expertConfig: {
expert_count: number;
keywords_per_expert: number;
custom_experts?: string[];
};
expertSource: ExpertSource;
expertLanguage: 'en' | 'zh';
lang?: PromptLanguage;
shouldStartTransform: boolean;
onTransformComplete: () => void;
onLoadingChange: (loading: boolean) => void;
onResultsChange?: (results: { pathA: ExpertTransformationDAGResult | null; pathB: ExpertTransformationDAGResult | null }) => void;
}
interface SinglePathTransformProps {
label: string;
color: 'blue' | 'green';
attributeData: AttributeDAG | null;
crossoverSource: string; // The query from which attributes were crossed over
isDark: boolean;
model?: string;
temperature?: number;
expertConfig: {
expert_count: number;
keywords_per_expert: number;
custom_experts?: string[];
};
expertSource: ExpertSource;
expertLanguage: 'en' | 'zh';
lang?: PromptLanguage;
shouldStart: boolean;
onComplete: () => void;
onResultChange: (result: ExpertTransformationDAGResult | null) => void;
}
function SinglePathTransform({
label,
color,
attributeData,
crossoverSource,
isDark,
model,
temperature,
expertConfig,
expertSource,
expertLanguage,
lang = 'zh',
shouldStart,
onComplete,
onResultChange,
}: SinglePathTransformProps) {
const {
loading,
progress,
results,
transformAll,
clearResults,
} = useExpertTransformation({ model, temperature, expertSource, expertLanguage, lang });
// Notify parent of results changes
useEffect(() => {
onResultChange(results);
}, [results, onResultChange]);
// Build transformation input
const transformationInput = useMemo(() => {
if (!attributeData) return null;
const attributesByCategory: Record<string, string[]> = {};
for (const node of attributeData.nodes) {
if (!attributesByCategory[node.category]) {
attributesByCategory[node.category] = [];
}
attributesByCategory[node.category].push(node.name);
}
return {
query: attributeData.query,
categories: attributeData.categories,
attributesByCategory,
expertConfig,
};
}, [attributeData, expertConfig]);
// Handle transform trigger
useEffect(() => {
if (shouldStart && transformationInput && !loading && !results) {
transformAll(transformationInput).then(() => onComplete());
}
}, [shouldStart, transformationInput, loading, results, transformAll, onComplete]);
// Progress percentage
const progressPercent = useMemo(() => {
if (!transformationInput || progress.step === 'idle') return 0;
const totalCategories = transformationInput.categories.length;
if (totalCategories === 0) return 0;
const completed = progress.processedCategories.length;
return Math.round((completed / totalCategories) * 100);
}, [transformationInput, progress]);
const headerStyle: React.CSSProperties = {
padding: '8px 12px',
background: isDark ? '#1f1f1f' : '#fafafa',
borderBottom: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
display: 'flex',
alignItems: 'center',
gap: 8,
};
if (!attributeData || attributeData.nodes.length === 0) {
return (
<div style={{ height: '100%', display: 'flex', flexDirection: 'column' }}>
<div style={headerStyle}>
<Tag color={color}>{label}</Tag>
<Text type="secondary">No crossover attributes selected</Text>
</div>
<div style={{ flex: 1, display: 'flex', justifyContent: 'center', alignItems: 'center' }}>
<Empty description="Select crossover pairs first" />
</div>
</div>
);
}
if (loading) {
return (
<div style={{ height: '100%', display: 'flex', flexDirection: 'column' }}>
<div style={headerStyle}>
<Tag color={color}>{label}</Tag>
<Text strong>{attributeData.query}</Text>
<Text type="secondary"> receiving {attributeData.nodes.length} attributes</Text>
</div>
<div style={{ flex: 1, display: 'flex', flexDirection: 'column', justifyContent: 'center', alignItems: 'center', gap: 16 }}>
<Spin size="large" />
<Text>{progress.message}</Text>
<Progress percent={progressPercent} style={{ width: 200 }} />
</div>
</div>
);
}
if (results) {
return (
<div style={{ height: '100%', display: 'flex', flexDirection: 'column' }}>
<div style={headerStyle}>
<Tag color={color}>{label}</Tag>
<Text strong>{attributeData.query}</Text>
<Text type="secondary">
{results.results.reduce((sum, r) => sum + r.descriptions.length, 0)} descriptions
</Text>
<div style={{ flex: 1 }} />
<Button size="small" icon={<ReloadOutlined />} onClick={clearResults}>
Clear
</Button>
</div>
<div style={{ flex: 1, overflow: 'hidden' }}>
<TransformationDAG
data={results}
categories={attributeData.categories}
isDark={isDark}
crossoverSource={crossoverSource}
/>
</div>
</div>
);
}
// Ready state - show attributes to transform
return (
<div style={{ height: '100%', display: 'flex', flexDirection: 'column' }}>
<div style={headerStyle}>
<Tag color={color}>{label}</Tag>
<Text strong>{attributeData.query}</Text>
<Text type="secondary"> {attributeData.nodes.length} attributes to transform</Text>
</div>
<div style={{ flex: 1, padding: 16, overflow: 'auto' }}>
<Card size="small" title="Crossover Attributes">
<Space wrap>
{attributeData.nodes.map(node => (
<Tag key={node.id} color={color === 'blue' ? 'green' : 'blue'}>
{node.name}
<Text type="secondary" style={{ marginLeft: 4, fontSize: 10 }}>
({node.category})
</Text>
</Tag>
))}
</Space>
</Card>
<Text type="secondary" style={{ display: 'block', marginTop: 12, fontSize: 12 }}>
These attributes will be transformed with "{attributeData.query}" as context.
</Text>
</div>
</div>
);
}
export function DualTransformationPanel({
crossoverDAGA,
crossoverDAGB,
isDark,
model,
temperature,
expertConfig,
expertSource,
expertLanguage,
lang = 'zh',
shouldStartTransform,
onTransformComplete,
onLoadingChange,
onResultsChange,
}: DualTransformationPanelProps) {
const [resultA, setResultA] = useState<ExpertTransformationDAGResult | null>(null);
const [resultB, setResultB] = useState<ExpertTransformationDAGResult | null>(null);
const [triggerA, setTriggerA] = useState(false);
const [triggerB, setTriggerB] = useState(false);
// Handle external transform trigger
useEffect(() => {
if (shouldStartTransform) {
setTriggerA(true);
setTriggerB(true);
onLoadingChange(true);
}
}, [shouldStartTransform, onLoadingChange]);
// Notify parent of results
useEffect(() => {
onResultsChange?.({ pathA: resultA, pathB: resultB });
}, [resultA, resultB, onResultsChange]);
const handleCompleteA = useCallback(() => {
setTriggerA(false);
}, []);
const handleCompleteB = useCallback(() => {
setTriggerB(false);
}, []);
// When both are done, notify parent
useEffect(() => {
if (!triggerA && !triggerB && (resultA || resultB)) {
onLoadingChange(false);
onTransformComplete();
}
}, [triggerA, triggerB, resultA, resultB, onLoadingChange, onTransformComplete]);
const containerStyle: React.CSSProperties = {
display: 'flex',
flexDirection: 'column',
height: '100%',
gap: 2,
};
const pathContainerStyle: React.CSSProperties = {
flex: 1,
minHeight: 0,
borderRadius: 6,
overflow: 'hidden',
border: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
};
const dividerStyle: React.CSSProperties = {
height: 4,
background: isDark ? '#303030' : '#f0f0f0',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
};
// Show empty state if no crossover data
if (!crossoverDAGA && !crossoverDAGB) {
return (
<div style={{ height: '100%', display: 'flex', justifyContent: 'center', alignItems: 'center' }}>
<Empty
description={
<Space direction="vertical" align="center">
<ThunderboltOutlined style={{ fontSize: 32, color: isDark ? '#444' : '#ccc' }} />
<Text>Select crossover pairs first</Text>
<Text type="secondary">
Go to the Crossover tab and select attribute pairs to transform
</Text>
</Space>
}
/>
</div>
);
}
return (
<div style={containerStyle}>
{/* Path A - Top (receives attributes from Path B) */}
<div style={pathContainerStyle}>
<SinglePathTransform
label="Path A"
color="blue"
attributeData={crossoverDAGA}
crossoverSource={crossoverDAGB?.query || 'Path B'}
isDark={isDark}
model={model}
temperature={temperature}
expertConfig={expertConfig}
expertSource={expertSource}
expertLanguage={expertLanguage}
lang={lang}
shouldStart={triggerA}
onComplete={handleCompleteA}
onResultChange={setResultA}
/>
</div>
{/* Divider */}
<div style={dividerStyle}>
<div style={{
width: 40,
height: 3,
borderRadius: 2,
background: isDark ? '#505050' : '#d0d0d0',
}} />
</div>
{/* Path B - Bottom (receives attributes from Path A) */}
<div style={pathContainerStyle}>
<SinglePathTransform
label="Path B"
color="green"
attributeData={crossoverDAGB}
crossoverSource={crossoverDAGA?.query || 'Path A'}
isDark={isDark}
model={model}
temperature={temperature}
expertConfig={expertConfig}
expertSource={expertSource}
expertLanguage={expertLanguage}
lang={lang}
shouldStart={triggerB}
onComplete={handleCompleteB}
onResultChange={setResultB}
/>
</div>
</div>
);
}

View File

@@ -23,7 +23,7 @@ import {
FileTextOutlined,
CodeOutlined,
} from '@ant-design/icons';
import type { AttributeDAG, CategoryMode } from '../types';
import type { AttributeDAG, CategoryMode, PromptLanguage } from '../types';
import { getModels } from '../services/api';
import { CategorySelector } from './CategorySelector';
@@ -59,12 +59,14 @@ interface InputPanelProps {
chainCount?: number,
categoryMode?: CategoryMode,
customCategories?: string[],
suggestedCategoryCount?: number
suggestedCategoryCount?: number,
lang?: PromptLanguage
) => Promise<void>;
onLoadHistory: (item: DAGHistoryItem) => void;
onLoadHistory: (item: DAGHistoryItem, lang?: PromptLanguage) => void;
onResetView?: () => void;
visualSettings: VisualSettings;
onVisualSettingsChange: (settings: VisualSettings) => void;
lang?: PromptLanguage;
}
export function InputPanel({
@@ -77,6 +79,7 @@ export function InputPanel({
onResetView,
visualSettings,
onVisualSettingsChange,
lang = 'zh',
}: InputPanelProps) {
const [query, setQuery] = useState('');
const [models, setModels] = useState<string[]>([]);
@@ -111,7 +114,7 @@ export function InputPanel({
const handleAnalyze = async () => {
if (!query.trim()) {
message.warning('Please enter a query');
message.warning(lang === 'zh' ? '請輸入查詢內容' : 'Please enter a query');
return;
}
@@ -123,11 +126,12 @@ export function InputPanel({
chainCount,
categoryMode,
customCategories.length > 0 ? customCategories : undefined,
suggestedCategoryCount
suggestedCategoryCount,
lang
);
setQuery('');
} catch {
message.error('Analysis failed');
message.error(lang === 'zh' ? '分析失敗' : 'Analysis failed');
}
};
@@ -489,7 +493,7 @@ export function InputPanel({
marginBottom: 4,
transition: 'background 0.2s',
}}
onClick={() => onLoadHistory(item)}
onClick={() => onLoadHistory(item, lang)}
className="history-item"
>
<div style={{ flex: 1, minWidth: 0 }}>

View File

@@ -0,0 +1,344 @@
import { useState, useCallback } from 'react';
import {
Card,
Button,
Space,
Typography,
Empty,
Input,
Tag,
List,
Tooltip,
message,
} from 'antd';
import {
SearchOutlined,
LinkOutlined,
CopyOutlined,
DeleteOutlined,
GlobalOutlined,
} from '@ant-design/icons';
import type {
ExpertTransformationDescription,
} from '../types';
const { Text, Paragraph } = Typography;
const { TextArea } = Input;
interface PatentSearchPanelProps {
descriptions?: ExpertTransformationDescription[];
isDark: boolean;
}
interface SearchItem {
id: string;
query: string;
searchUrl: string;
expertName?: string;
keyword?: string;
}
// Generate Google Patents search URL
function generatePatentSearchUrl(query: string): string {
// Extract key terms and create a search-friendly query
const encodedQuery = encodeURIComponent(query);
return `https://patents.google.com/?q=${encodedQuery}`;
}
// Generate Lens.org search URL (alternative)
function generateLensSearchUrl(query: string): string {
const encodedQuery = encodeURIComponent(query);
return `https://www.lens.org/lens/search/patent/list?q=${encodedQuery}`;
}
export function PatentSearchPanel({ descriptions, isDark }: PatentSearchPanelProps) {
const [customQuery, setCustomQuery] = useState('');
const [searchItems, setSearchItems] = useState<SearchItem[]>([]);
const [selectedDescriptions, setSelectedDescriptions] = useState<Set<number>>(new Set());
// Add custom query to search list
const handleAddCustomQuery = useCallback(() => {
if (!customQuery.trim()) return;
const newItem: SearchItem = {
id: `custom-${Date.now()}`,
query: customQuery.trim(),
searchUrl: generatePatentSearchUrl(customQuery.trim()),
};
setSearchItems(prev => [newItem, ...prev]);
setCustomQuery('');
message.success('Added to search list');
}, [customQuery]);
// Add selected descriptions to search list
const handleAddSelected = useCallback(() => {
if (!descriptions || selectedDescriptions.size === 0) return;
const newItems: SearchItem[] = Array.from(selectedDescriptions).map(idx => {
const desc = descriptions[idx];
return {
id: `desc-${idx}-${Date.now()}`,
query: desc.description,
searchUrl: generatePatentSearchUrl(desc.description),
expertName: desc.expert_name,
keyword: desc.keyword,
};
});
setSearchItems(prev => [...newItems, ...prev]);
setSelectedDescriptions(new Set());
message.success(`Added ${newItems.length} items to search list`);
}, [descriptions, selectedDescriptions]);
// Remove item from list
const handleRemoveItem = useCallback((id: string) => {
setSearchItems(prev => prev.filter(item => item.id !== id));
}, []);
// Copy URL to clipboard
const handleCopyUrl = useCallback((url: string) => {
navigator.clipboard.writeText(url);
message.success('URL copied to clipboard');
}, []);
// Toggle description selection
const toggleDescription = useCallback((index: number) => {
setSelectedDescriptions(prev => {
const next = new Set(prev);
if (next.has(index)) {
next.delete(index);
} else {
next.add(index);
}
return next;
});
}, []);
// Clear all
const handleClearAll = useCallback(() => {
setSearchItems([]);
}, []);
const containerStyle: React.CSSProperties = {
height: '100%',
display: 'flex',
flexDirection: 'column',
gap: 16,
padding: 16,
overflow: 'auto',
};
const cardStyle: React.CSSProperties = {
background: isDark ? '#1f1f1f' : '#fff',
};
return (
<div style={containerStyle}>
{/* Info banner */}
<Card size="small" style={cardStyle}>
<Space>
<GlobalOutlined style={{ color: '#1890ff' }} />
<Text>
Generate search links to check for similar patents on Google Patents or Lens.org
</Text>
</Space>
</Card>
{/* Custom search input */}
<Card size="small" title="Add Custom Search" style={cardStyle}>
<TextArea
placeholder="Enter a description to search for similar patents..."
value={customQuery}
onChange={e => setCustomQuery(e.target.value)}
autoSize={{ minRows: 2, maxRows: 4 }}
style={{ marginBottom: 8 }}
/>
<Button
type="primary"
icon={<SearchOutlined />}
onClick={handleAddCustomQuery}
disabled={!customQuery.trim()}
>
Add to Search List
</Button>
</Card>
{/* Description selection (if available) */}
{descriptions && descriptions.length > 0 && (
<Card
size="small"
title={`Generated Descriptions (${descriptions.length})`}
style={cardStyle}
extra={
<Button
type="primary"
size="small"
icon={<SearchOutlined />}
onClick={handleAddSelected}
disabled={selectedDescriptions.size === 0}
>
Add Selected ({selectedDescriptions.size})
</Button>
}
>
<div style={{ maxHeight: 200, overflow: 'auto' }}>
<Space direction="vertical" style={{ width: '100%' }}>
{descriptions.slice(0, 20).map((desc, idx) => (
<div
key={idx}
onClick={() => toggleDescription(idx)}
style={{
padding: 8,
borderRadius: 4,
cursor: 'pointer',
background: selectedDescriptions.has(idx)
? (isDark ? '#177ddc22' : '#1890ff11')
: (isDark ? '#141414' : '#fafafa'),
border: selectedDescriptions.has(idx)
? `1px solid ${isDark ? '#177ddc' : '#1890ff'}`
: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
}}
>
<Space size={4}>
<Tag color="blue" style={{ fontSize: 10 }}>{desc.expert_name}</Tag>
<Tag style={{ fontSize: 10 }}>{desc.keyword}</Tag>
</Space>
<Paragraph
ellipsis={{ rows: 2 }}
style={{ marginBottom: 0, marginTop: 4, fontSize: 12 }}
>
{desc.description}
</Paragraph>
</div>
))}
{descriptions.length > 20 && (
<Text type="secondary">
And {descriptions.length - 20} more descriptions...
</Text>
)}
</Space>
</div>
</Card>
)}
{/* Search list */}
{searchItems.length > 0 && (
<Card
size="small"
title={`Search List (${searchItems.length})`}
style={{ ...cardStyle, flex: 1, minHeight: 0, overflow: 'hidden', display: 'flex', flexDirection: 'column' }}
extra={
<Button size="small" danger onClick={handleClearAll}>
Clear All
</Button>
}
bodyStyle={{ flex: 1, overflow: 'auto', padding: 0 }}
>
<List
dataSource={searchItems}
renderItem={item => (
<List.Item
style={{
padding: '12px 16px',
borderBottom: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
}}
actions={[
<Tooltip title="Open in Google Patents" key="google">
<Button
type="link"
icon={<LinkOutlined />}
href={item.searchUrl}
target="_blank"
>
Google
</Button>
</Tooltip>,
<Tooltip title="Open in Lens.org" key="lens">
<Button
type="link"
icon={<GlobalOutlined />}
href={generateLensSearchUrl(item.query)}
target="_blank"
>
Lens
</Button>
</Tooltip>,
<Tooltip title="Copy URL" key="copy">
<Button
type="text"
icon={<CopyOutlined />}
onClick={() => handleCopyUrl(item.searchUrl)}
/>
</Tooltip>,
<Tooltip title="Remove" key="remove">
<Button
type="text"
danger
icon={<DeleteOutlined />}
onClick={() => handleRemoveItem(item.id)}
/>
</Tooltip>,
]}
>
<List.Item.Meta
title={
<Space size={4}>
{item.expertName && (
<Tag color="blue" style={{ fontSize: 10 }}>{item.expertName}</Tag>
)}
{item.keyword && (
<Tag style={{ fontSize: 10 }}>{item.keyword}</Tag>
)}
</Space>
}
description={
<Paragraph
ellipsis={{ rows: 2 }}
style={{ marginBottom: 0, fontSize: 12 }}
>
{item.query}
</Paragraph>
}
/>
</List.Item>
)}
/>
</Card>
)}
{/* Empty state */}
{searchItems.length === 0 && (!descriptions || descriptions.length === 0) && (
<Card style={cardStyle}>
<Empty
description={
<Space direction="vertical">
<Text>Enter a description or run transformations first</Text>
<Text type="secondary">
Search links will open in Google Patents or Lens.org
</Text>
</Space>
}
/>
</Card>
)}
{/* Empty state with descriptions but no search items */}
{searchItems.length === 0 && descriptions && descriptions.length > 0 && (
<Card style={cardStyle}>
<Empty
image={Empty.PRESENTED_IMAGE_SIMPLE}
description={
<Space direction="vertical">
<Text>Select descriptions above to add to search list</Text>
<Text type="secondary">
Then click the links to search on Google Patents or Lens.org
</Text>
</Space>
}
/>
</Card>
)}
</div>
);
}

View File

@@ -23,6 +23,7 @@ interface TransformationDAGProps {
data: TransformationDAGResult | ExpertTransformationDAGResult;
categories: CategoryDefinition[];
isDark: boolean;
crossoverSource?: string; // If set, marks all attributes as crossed over from this source
}
export interface TransformationDAGRef {
@@ -30,7 +31,7 @@ export interface TransformationDAGRef {
}
const TransformationDAGInner = forwardRef<TransformationDAGRef, TransformationDAGProps>(
({ data, categories, isDark }, ref) => {
({ data, categories, isDark, crossoverSource }, ref) => {
const { setViewport } = useReactFlow();
// Check if data is ExpertTransformationDAGResult by checking for 'experts' property
@@ -46,7 +47,7 @@ const TransformationDAGInner = forwardRef<TransformationDAGRef, TransformationDA
const expertLayout = useExpertTransformationLayout(
isExpertTransformation ? (data as ExpertTransformationDAGResult) : null,
categories,
{ isDark, fontSize: 13 }
{ isDark, fontSize: 13, crossoverSource }
);
const { nodes, edges } = isExpertTransformation ? expertLayout : regularLayout;

View File

@@ -1,7 +1,7 @@
import { forwardRef, useMemo, useCallback, useEffect } from 'react';
import { Empty, Spin, Button, Progress, Card, Space, Typography, Tag } from 'antd';
import { ReloadOutlined } from '@ant-design/icons';
import type { AttributeDAG, ExpertTransformationInput, ExpertSource, ExpertTransformationDAGResult } from '../types';
import type { AttributeDAG, ExpertTransformationInput, ExpertSource, ExpertTransformationDAGResult, PromptLanguage } from '../types';
import { TransformationDAG } from './TransformationDAG';
import type { TransformationDAGRef } from './TransformationDAG';
import { useExpertTransformation } from '../hooks/useExpertTransformation';
@@ -20,6 +20,7 @@ interface TransformationPanelProps {
};
expertSource: ExpertSource;
expertLanguage: 'en' | 'zh';
lang?: PromptLanguage;
shouldStartTransform: boolean;
onTransformComplete: () => void;
onLoadingChange: (loading: boolean) => void;
@@ -27,14 +28,14 @@ interface TransformationPanelProps {
}
export const TransformationPanel = forwardRef<TransformationDAGRef, TransformationPanelProps>(
({ attributeData, isDark, model, temperature, expertConfig, expertSource, expertLanguage, shouldStartTransform, onTransformComplete, onLoadingChange, onResultsChange }, ref) => {
({ attributeData, isDark, model, temperature, expertConfig, expertSource, expertLanguage, lang = 'zh', shouldStartTransform, onTransformComplete, onLoadingChange, onResultsChange }, ref) => {
const {
loading,
progress,
results,
transformAll,
clearResults,
} = useExpertTransformation({ model, temperature, expertSource, expertLanguage });
} = useExpertTransformation({ model, temperature, expertSource, expertLanguage, lang });
// Notify parent of loading state changes
useEffect(() => {

View File

@@ -0,0 +1,76 @@
import { Card, Tag, Checkbox, Typography, Space } from 'antd';
import { SwapOutlined } from '@ant-design/icons';
import type { CrossoverPair } from '../../types';
const { Text } = Typography;
interface CrossoverCardProps {
pair: CrossoverPair;
onToggle: (pairId: string) => void;
isDark: boolean;
}
export function CrossoverCard({ pair, onToggle, isDark }: CrossoverCardProps) {
const sourceColor = pair.sourcePathId === 'A' ? 'blue' : 'green';
const targetColor = pair.targetPathId === 'A' ? 'blue' : 'green';
return (
<Card
size="small"
hoverable
onClick={() => onToggle(pair.id)}
style={{
cursor: 'pointer',
border: pair.selected
? `2px solid ${isDark ? '#1890ff' : '#1890ff'}`
: `1px solid ${isDark ? '#303030' : '#f0f0f0'}`,
background: pair.selected
? (isDark ? '#111d2c' : '#e6f4ff')
: (isDark ? '#1f1f1f' : '#fff'),
}}
styles={{
body: { padding: '8px 12px' },
}}
>
<Space direction="vertical" size={4} style={{ width: '100%' }}>
{/* Header with checkbox */}
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>
<Checkbox
checked={pair.selected}
onChange={() => onToggle(pair.id)}
onClick={(e) => e.stopPropagation()}
/>
<Tag style={{ margin: 0, fontSize: 10 }}>{pair.crossType}</Tag>
</div>
{/* Crossover visualization */}
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>
{/* Source */}
<div style={{ flex: 1, textAlign: 'right' }}>
<Tag color={sourceColor} style={{ margin: 0 }}>
{pair.sourcePathId}
</Tag>
<Text style={{ marginLeft: 4 }}>{pair.sourceNode.name}</Text>
<Text type="secondary" style={{ fontSize: 10, display: 'block' }}>
{pair.sourceNode.category}
</Text>
</div>
{/* Cross icon */}
<SwapOutlined style={{ color: isDark ? '#888' : '#999' }} />
{/* Target */}
<div style={{ flex: 1 }}>
<Tag color={targetColor} style={{ margin: 0 }}>
{pair.targetPathId}
</Tag>
<Text style={{ marginLeft: 4 }}>{pair.targetNode.name}</Text>
<Text type="secondary" style={{ fontSize: 10, display: 'block' }}>
{pair.targetNode.category}
</Text>
</div>
</div>
</Space>
</Card>
);
}

View File

@@ -0,0 +1,163 @@
import { useMemo } from 'react';
import { Table, Tag, Checkbox, Typography, Tooltip } from 'antd';
import type { AttributeDAG, CrossoverPair, DAGNode } from '../../types';
const { Text } = Typography;
interface CrossoverMatrixProps {
dagA: AttributeDAG;
dagB: AttributeDAG;
pairs: CrossoverPair[];
onTogglePair: (pairId: string) => void;
isDark: boolean;
}
interface MatrixCell {
nodeA: DAGNode;
nodeB: DAGNode;
pair: CrossoverPair | null;
}
export function CrossoverMatrix({
dagA,
dagB,
pairs,
onTogglePair,
isDark,
}: CrossoverMatrixProps) {
// Group nodes by category
const groupByCategory = (nodes: DAGNode[]) => {
return nodes.reduce((acc, node) => {
if (!acc[node.category]) acc[node.category] = [];
acc[node.category].push(node);
return acc;
}, {} as Record<string, DAGNode[]>);
};
const nodesByCategoryA = useMemo(() => groupByCategory(dagA.nodes), [dagA.nodes]);
const nodesByCategoryB = useMemo(() => groupByCategory(dagB.nodes), [dagB.nodes]);
// Create a lookup map for pairs
const pairLookup = useMemo(() => {
const lookup = new Map<string, CrossoverPair>();
for (const pair of pairs) {
// Key format: sourceNodeId-targetNodeId
lookup.set(`${pair.sourceNode.id}-${pair.targetNode.id}`, pair);
}
return lookup;
}, [pairs]);
// Get all categories from both DAGs
const categoriesA = Object.keys(nodesByCategoryA);
const categoriesB = Object.keys(nodesByCategoryB);
// Render a matrix for each category pair
const renderCategoryMatrix = (categoryA: string, categoryB: string) => {
const nodesA = nodesByCategoryA[categoryA] || [];
const nodesB = nodesByCategoryB[categoryB] || [];
if (nodesA.length === 0 || nodesB.length === 0) return null;
// Create matrix data
const matrixData = nodesA.map((nodeA) => {
const row: Record<string, MatrixCell | DAGNode> = { nodeA };
for (const nodeB of nodesB) {
const pair = pairLookup.get(`${nodeA.id}-${nodeB.id}`) ||
pairLookup.get(`${nodeB.id}-${nodeA.id}`) ||
null;
row[nodeB.id] = { nodeA, nodeB, pair };
}
return row;
});
// Create columns
const columns = [
{
title: (
<div style={{ display: 'flex', alignItems: 'center', gap: 4 }}>
<Tag color="blue">A</Tag>
<Text strong>{categoryA}</Text>
</div>
),
dataIndex: 'nodeA',
key: 'nodeA',
width: 120,
fixed: 'left' as const,
render: (node: DAGNode) => (
<Text ellipsis style={{ maxWidth: 100 }}>{node.name}</Text>
),
},
...nodesB.map((nodeB) => ({
title: (
<Tooltip title={nodeB.name}>
<Text ellipsis style={{ maxWidth: 80 }}>{nodeB.name}</Text>
</Tooltip>
),
dataIndex: nodeB.id,
key: nodeB.id,
width: 60,
align: 'center' as const,
render: (cell: MatrixCell) => {
if (!cell || !cell.pair) {
return <div style={{ color: isDark ? '#555' : '#ddd' }}>-</div>;
}
return (
<Checkbox
checked={cell.pair.selected}
onChange={() => onTogglePair(cell.pair!.id)}
/>
);
},
})),
];
return (
<div key={`${categoryA}-${categoryB}`} style={{ marginBottom: 16 }}>
<div style={{
display: 'flex',
alignItems: 'center',
gap: 8,
marginBottom: 8,
padding: '4px 8px',
background: isDark ? '#1f1f1f' : '#fafafa',
borderRadius: 4,
}}>
<Tag color="blue">A: {categoryA}</Tag>
<Text type="secondary">×</Text>
<Tag color="green">B: {categoryB}</Tag>
</div>
<Table
dataSource={matrixData}
columns={columns}
pagination={false}
size="small"
scroll={{ x: 'max-content' }}
rowKey={(row) => (row.nodeA as DAGNode).id}
bordered
/>
</div>
);
};
// Render matrices for matching categories (same category in both paths)
const matchingCategories = categoriesA.filter(cat => categoriesB.includes(cat));
return (
<div style={{ padding: 16 }}>
{matchingCategories.length > 0 ? (
<>
<Text type="secondary" style={{ display: 'block', marginBottom: 16 }}>
Select attribute pairs to cross between Path A and Path B
</Text>
{matchingCategories.map(category =>
renderCategoryMatrix(category, category)
)}
</>
) : (
<Text type="secondary">
No matching categories found between the two paths
</Text>
)}
</div>
);
}

View File

@@ -0,0 +1,189 @@
import { useMemo } from 'react';
import { Card, Tag, Typography, Space, Empty, Divider } from 'antd';
import { SwapOutlined, ArrowRightOutlined } from '@ant-design/icons';
import type { CrossoverPair, AttributeDAG } from '../../types';
const { Text } = Typography;
interface CrossoverPreviewProps {
selectedPairs: CrossoverPair[];
dagA: AttributeDAG | null;
dagB: AttributeDAG | null;
isDark: boolean;
}
interface GroupedAttribute {
category: string;
attributes: string[];
}
export function CrossoverPreview({
selectedPairs,
dagA,
dagB,
isDark,
}: CrossoverPreviewProps) {
// Group selected attributes by path and category
const { pathAGroups, pathBGroups, crossPairs } = useMemo(() => {
const pathAMap = new Map<string, Set<string>>();
const pathBMap = new Map<string, Set<string>>();
const crosses: Array<{
sourceAttr: string;
sourceCategory: string;
sourcePath: string;
targetAttr: string;
targetCategory: string;
targetPath: string;
}> = [];
for (const pair of selectedPairs) {
// Track source attributes
const sourceKey = pair.sourceNode.category;
if (!pathAMap.has(sourceKey)) pathAMap.set(sourceKey, new Set());
pathAMap.get(sourceKey)!.add(pair.sourceNode.name);
// Track target attributes
const targetKey = pair.targetNode.category;
if (!pathBMap.has(targetKey)) pathBMap.set(targetKey, new Set());
pathBMap.get(targetKey)!.add(pair.targetNode.name);
// Track cross pairs for visualization
crosses.push({
sourceAttr: pair.sourceNode.name,
sourceCategory: pair.sourceNode.category,
sourcePath: pair.sourcePathId,
targetAttr: pair.targetNode.name,
targetCategory: pair.targetNode.category,
targetPath: pair.targetPathId,
});
}
const pathAGroups: GroupedAttribute[] = Array.from(pathAMap.entries()).map(
([category, attrs]) => ({ category, attributes: Array.from(attrs) })
);
const pathBGroups: GroupedAttribute[] = Array.from(pathBMap.entries()).map(
([category, attrs]) => ({ category, attributes: Array.from(attrs) })
);
return { pathAGroups, pathBGroups, crossPairs: crosses };
}, [selectedPairs]);
if (selectedPairs.length === 0) {
return (
<Card size="small" style={{ marginBottom: 16 }}>
<Empty
description="No pairs selected"
image={Empty.PRESENTED_IMAGE_SIMPLE}
/>
</Card>
);
}
const cardStyle = {
background: isDark ? '#1f1f1f' : '#fafafa',
borderRadius: 8,
};
const renderPathSummary = (
label: string,
color: 'blue' | 'green',
groups: GroupedAttribute[],
query: string | undefined
) => (
<div style={{ flex: 1, minWidth: 150 }}>
<div style={{ marginBottom: 8 }}>
<Tag color={color}>{label}</Tag>
{query && (
<Text strong style={{ marginLeft: 4 }}>
{query}
</Text>
)}
</div>
<Space direction="vertical" size={4} style={{ width: '100%' }}>
{groups.map(({ category, attributes }) => (
<div key={category}>
<Text type="secondary" style={{ fontSize: 11 }}>
{category}:
</Text>
<div style={{ marginLeft: 8 }}>
{attributes.map((attr, i) => (
<Tag
key={i}
style={{
margin: '2px 4px 2px 0',
fontSize: 11,
padding: '0 6px',
}}
>
{attr}
</Tag>
))}
</div>
</div>
))}
</Space>
</div>
);
// Get unique cross pairs for display (limit to first 5)
const uniqueCrosses = crossPairs.slice(0, 5);
return (
<Card
size="small"
title={
<Space>
<SwapOutlined />
<Text strong>Selection Preview</Text>
<Tag>{selectedPairs.length} pairs</Tag>
</Space>
}
style={{ marginBottom: 16 }}
>
{/* Side by side path summary */}
<div style={{ display: 'flex', gap: 16, marginBottom: 16 }}>
{renderPathSummary('Path A', 'blue', pathAGroups, dagA?.query)}
<Divider type="vertical" style={{ height: 'auto' }} />
{renderPathSummary('Path B', 'green', pathBGroups, dagB?.query)}
</div>
{/* Cross visualization */}
<div style={cardStyle}>
<div style={{ padding: 12 }}>
<Text type="secondary" style={{ fontSize: 12, display: 'block', marginBottom: 8 }}>
Sample Crossovers:
</Text>
<Space direction="vertical" size={4} style={{ width: '100%' }}>
{uniqueCrosses.map((cross, i) => (
<div
key={i}
style={{
display: 'flex',
alignItems: 'center',
gap: 8,
padding: '4px 8px',
background: isDark ? '#141414' : '#fff',
borderRadius: 4,
fontSize: 12,
}}
>
<Tag color="blue" style={{ margin: 0 }}>A</Tag>
<Text ellipsis style={{ maxWidth: 100 }}>{cross.sourceAttr}</Text>
<Text type="secondary" style={{ fontSize: 10 }}>({cross.sourceCategory})</Text>
<ArrowRightOutlined style={{ color: isDark ? '#666' : '#999' }} />
<Tag color="green" style={{ margin: 0 }}>B</Tag>
<Text ellipsis style={{ maxWidth: 100 }}>{cross.targetAttr}</Text>
<Text type="secondary" style={{ fontSize: 10 }}>({cross.targetCategory})</Text>
</div>
))}
{crossPairs.length > 5 && (
<Text type="secondary" style={{ fontSize: 11 }}>
... and {crossPairs.length - 5} more pairs
</Text>
)}
</Space>
</div>
</div>
</Card>
);
}

View File

@@ -1,36 +1,73 @@
import { memo } from 'react';
import { SwapOutlined } from '@ant-design/icons';
interface OriginalAttributeNodeProps {
data: {
label: string;
color: string;
isDark: boolean;
crossoverSource?: string; // The query from which this attribute was crossed over
};
}
export const OriginalAttributeNode = memo(({ data }: OriginalAttributeNodeProps) => {
const { label, color, isDark } = data;
const { label, color, isDark, crossoverSource } = data;
const isCrossover = !!crossoverSource;
return (
<div
style={{
padding: '8px 16px',
position: 'relative',
padding: isCrossover ? '8px 16px 8px 12px' : '8px 16px',
borderRadius: 8,
background: isDark
? `linear-gradient(135deg, ${color}22 0%, ${color}0a 100%)`
: `linear-gradient(135deg, ${color}1a 0%, ${color}05 100%)`,
border: `1px solid ${color}33`,
border: isCrossover
? `2px dashed ${isDark ? '#faad14' : '#d48806'}`
: `1px solid ${color}33`,
color: isDark ? 'rgba(255,255,255,0.9)' : 'rgba(0,0,0,0.85)',
fontSize: '13px',
fontWeight: 500,
textAlign: 'center',
whiteSpace: 'nowrap',
userSelect: 'none',
boxShadow: '0 2px 6px rgba(0,0,0,0.05)',
boxShadow: isCrossover
? '0 2px 8px rgba(250, 173, 20, 0.3)'
: '0 2px 6px rgba(0,0,0,0.05)',
}}
>
{isCrossover && (
<SwapOutlined
style={{
position: 'absolute',
top: -8,
left: -8,
fontSize: 14,
color: isDark ? '#faad14' : '#d48806',
background: isDark ? '#1f1f1f' : '#fff',
borderRadius: '50%',
padding: 2,
border: `1px solid ${isDark ? '#faad14' : '#d48806'}`,
}}
/>
)}
<div style={{ display: 'flex', alignItems: 'center', gap: 6 }}>
{label}
</div>
{isCrossover && (
<div
style={{
fontSize: 9,
color: isDark ? '#faad14' : '#d48806',
marginTop: 2,
fontWeight: 400,
}}
>
from {crossoverSource}
</div>
)}
</div>
);
});

View File

@@ -5,6 +5,7 @@ import type { ExpertTransformationDAGResult, CategoryDefinition } from '../../ty
interface LayoutConfig {
isDark: boolean;
fontSize?: number;
crossoverSource?: string; // If set, marks all attributes as crossed over from this source
}
const COLOR_PALETTE = [
@@ -39,7 +40,7 @@ export function useExpertTransformationLayout(
return { nodes: [], edges: [] };
}
const { isDark, fontSize = 13 } = config;
const { isDark, fontSize = 13, crossoverSource } = config;
const nodes: Node[] = [];
const edges: Edge[] = [];
@@ -233,6 +234,7 @@ export function useExpertTransformationLayout(
label: group.attribute,
color,
isDark,
crossoverSource, // Mark as crossover if source is provided
},
draggable: false,
selectable: false,

View File

@@ -1,7 +1,8 @@
import { useState, useCallback } from 'react';
import type {
AttributeDAG,
DAGStreamAnalyzeResponse
DAGStreamAnalyzeResponse,
PromptLanguage
} from '../types';
import { CategoryMode } from '../types';
import { analyzeAttributesStream } from '../services/api';
@@ -34,12 +35,13 @@ export function useAttribute() {
chainCount: number = 5,
categoryMode: CategoryMode = CategoryMode.DYNAMIC_AUTO,
customCategories?: string[],
suggestedCategoryCount: number = 3
suggestedCategoryCount: number = 3,
lang: PromptLanguage = 'zh'
) => {
// 重置狀態
// Reset state
setProgress({
step: 'idle',
message: '準備開始分析...',
message: lang === 'zh' ? '準備開始分析...' : 'Preparing analysis...',
});
setError(null);
setCurrentResult(null);
@@ -53,62 +55,63 @@ export function useAttribute() {
temperature,
category_mode: categoryMode,
custom_categories: customCategories,
suggested_category_count: suggestedCategoryCount
suggested_category_count: suggestedCategoryCount,
lang
},
{
onStep0Start: () => {
setProgress({
step: 'step0',
message: '正在分析類別...',
message: lang === 'zh' ? '正在分析類別...' : 'Analyzing categories...',
});
},
onStep0Complete: () => {
setProgress(prev => ({
...prev,
message: '類別分析完成',
message: lang === 'zh' ? '類別分析完成' : 'Category analysis complete',
}));
},
onCategoriesResolved: (categories) => {
setProgress(prev => ({
...prev,
message: `使用 ${categories.length} 個類別`,
message: lang === 'zh' ? `使用 ${categories.length} 個類別` : `Using ${categories.length} categories`,
}));
},
onStep1Start: () => {
setProgress({
step: 'step1',
message: '正在分析物件屬性列表...',
message: lang === 'zh' ? '正在分析物件屬性列表...' : 'Analyzing object attributes...',
});
},
onStep1Complete: () => {
setProgress(prev => ({
...prev,
message: '屬性列表分析完成',
message: lang === 'zh' ? '屬性列表分析完成' : 'Attribute analysis complete',
}));
},
onRelationshipsStart: () => {
setProgress({
step: 'relationships',
message: '正在生成關係...',
message: lang === 'zh' ? '正在生成關係...' : 'Generating relationships...',
});
},
onRelationshipsComplete: (count) => {
setProgress(prev => ({
...prev,
message: `生成 ${count} 個關係`,
message: lang === 'zh' ? `生成 ${count} 個關係` : `Generated ${count} relationships`,
}));
},
onDone: (response: DAGStreamAnalyzeResponse) => {
setProgress({
step: 'done',
message: '分析完成!',
message: lang === 'zh' ? '分析完成!' : 'Analysis complete!',
});
setCurrentResult(response.dag);
@@ -126,7 +129,7 @@ export function useAttribute() {
setProgress({
step: 'error',
error: errorMsg,
message: `錯誤: ${errorMsg}`,
message: lang === 'zh' ? `錯誤: ${errorMsg}` : `Error: ${errorMsg}`,
});
setError(errorMsg);
},
@@ -137,19 +140,19 @@ export function useAttribute() {
setProgress({
step: 'error',
error: errorMessage,
message: `錯誤: ${errorMessage}`,
message: lang === 'zh' ? `錯誤: ${errorMessage}` : `Error: ${errorMessage}`,
});
setError(errorMessage);
throw err;
}
}, []);
const loadFromHistory = useCallback((item: DAGHistoryItem) => {
const loadFromHistory = useCallback((item: DAGHistoryItem, lang: PromptLanguage = 'zh') => {
setCurrentResult(item.result);
setError(null);
setProgress({
step: 'done',
message: '從歷史記錄載入',
message: lang === 'zh' ? '從歷史記錄載入' : 'Loaded from history',
});
}, []);

View File

@@ -0,0 +1,182 @@
import { useState, useCallback, useMemo } from 'react';
import type {
AttributeDAG,
DAGNode,
CrossoverPair,
CrossoverConfig,
CrossoverResult,
} from '../types';
const DEFAULT_CONFIG: CrossoverConfig = {
enabledCrossTypes: ['same-category', 'cross-category'],
maxPairsPerType: 10,
autoGenerate: true,
};
export function useAttributeCrossover() {
const [pairs, setPairs] = useState<CrossoverPair[]>([]);
const [config, setConfig] = useState<CrossoverConfig>(DEFAULT_CONFIG);
// Group nodes by category
const groupNodesByCategory = useCallback((nodes: DAGNode[]): Record<string, DAGNode[]> => {
return nodes.reduce((acc, node) => {
if (!acc[node.category]) acc[node.category] = [];
acc[node.category].push(node);
return acc;
}, {} as Record<string, DAGNode[]>);
}, []);
// Generate crossover pairs from two DAGs
const generatePairs = useCallback((
dagA: AttributeDAG,
dagB: AttributeDAG
): CrossoverPair[] => {
const newPairs: CrossoverPair[] = [];
const nodesByCategoryA = groupNodesByCategory(dagA.nodes);
const nodesByCategoryB = groupNodesByCategory(dagB.nodes);
// Get all unique categories from both DAGs
const categoriesA = Object.keys(nodesByCategoryA);
const categoriesB = Object.keys(nodesByCategoryB);
// Strategy 1: Cross matching categories (A's category X with B's category X)
// This helps find how different objects approach the same category differently
for (const category of categoriesA) {
if (nodesByCategoryB[category]) {
const nodesA = nodesByCategoryA[category].slice(0, config.maxPairsPerType);
const nodesB = nodesByCategoryB[category].slice(0, config.maxPairsPerType);
for (const nodeA of nodesA) {
for (const nodeB of nodesB) {
newPairs.push({
id: `same-${nodeA.id}-${nodeB.id}`,
sourcePathId: 'A',
sourceNode: nodeA,
targetPathId: 'B',
targetNode: nodeB,
crossType: `same-${category}`,
selected: true, // Default selected for same-category pairs
});
}
}
}
}
// Strategy 2: Cross different categories between A and B
// This creates innovative combinations (e.g., A's materials with B's functions)
for (const categoryA of categoriesA) {
for (const categoryB of categoriesB) {
// Skip if same category (already handled above)
if (categoryA === categoryB) continue;
const nodesA = nodesByCategoryA[categoryA].slice(0, 3);
const nodesB = nodesByCategoryB[categoryB].slice(0, 3);
if (nodesA.length > 0 && nodesB.length > 0) {
for (const nodeA of nodesA) {
for (const nodeB of nodesB) {
newPairs.push({
id: `cross-${nodeA.id}-${nodeB.id}`,
sourcePathId: 'A',
sourceNode: nodeA,
targetPathId: 'B',
targetNode: nodeB,
crossType: `cross-${categoryA}-${categoryB}`,
selected: false, // Cross-category pairs default to not selected
});
}
}
}
}
}
return newPairs;
}, [groupNodesByCategory, config.maxPairsPerType]);
// Apply generated pairs to state
const applyPairs = useCallback((dagA: AttributeDAG, dagB: AttributeDAG) => {
const newPairs = generatePairs(dagA, dagB);
setPairs(newPairs);
return newPairs;
}, [generatePairs]);
// Toggle pair selection
const togglePairSelection = useCallback((pairId: string) => {
setPairs(prev => prev.map(pair =>
pair.id === pairId
? { ...pair, selected: !pair.selected }
: pair
));
}, []);
// Select all pairs of a specific type
const selectPairsByType = useCallback((crossType: string, selected: boolean) => {
setPairs(prev => prev.map(pair =>
pair.crossType === crossType
? { ...pair, selected }
: pair
));
}, []);
// Select all pairs
const selectAll = useCallback((selected: boolean) => {
setPairs(prev => prev.map(pair => ({ ...pair, selected })));
}, []);
// Clear all pairs
const clearPairs = useCallback(() => {
setPairs([]);
}, []);
// Get pairs grouped by cross type
const pairsByType = useMemo(() => {
return pairs.reduce((acc, pair) => {
if (!acc[pair.crossType]) acc[pair.crossType] = [];
acc[pair.crossType].push(pair);
return acc;
}, {} as Record<string, CrossoverPair[]>);
}, [pairs]);
// Get selected pairs only
const selectedPairs = useMemo(() => {
return pairs.filter(p => p.selected);
}, [pairs]);
// Get cross type statistics
const crossTypeStats = useMemo(() => {
const stats: Record<string, { total: number; selected: number }> = {};
for (const pair of pairs) {
if (!stats[pair.crossType]) {
stats[pair.crossType] = { total: 0, selected: 0 };
}
stats[pair.crossType].total++;
if (pair.selected) {
stats[pair.crossType].selected++;
}
}
return stats;
}, [pairs]);
// Get result summary
const result: CrossoverResult = useMemo(() => ({
pairs,
totalPairs: pairs.length,
}), [pairs]);
return {
pairs,
selectedPairs,
pairsByType,
crossTypeStats,
config,
result,
setConfig,
generatePairs,
applyPairs,
togglePairSelection,
selectPairsByType,
selectAll,
clearPairs,
};
}

View File

@@ -5,6 +5,7 @@ import type {
DeduplicationResult,
DeduplicationProgress,
DeduplicationMethod,
PromptLanguage,
} from '../types';
/**
@@ -25,14 +26,16 @@ export function useDeduplication() {
* @param descriptions - List of descriptions to deduplicate
* @param threshold - Similarity threshold (only used for embedding method)
* @param method - Deduplication method: 'embedding' (fast) or 'llm' (accurate but slow)
* @param lang - Prompt language for LLM method
*/
const deduplicate = useCallback(async (
descriptions: ExpertTransformationDescription[],
threshold: number = 0.85,
method: DeduplicationMethod = 'embedding'
method: DeduplicationMethod = 'embedding',
lang: PromptLanguage = 'zh'
) => {
if (!descriptions || descriptions.length === 0) {
setError('No descriptions to deduplicate');
setError(lang === 'zh' ? '沒有可去重的描述' : 'No descriptions to deduplicate');
return;
}
@@ -40,12 +43,15 @@ export function useDeduplication() {
setError(null);
setResult(null);
// 根據方法顯示不同的進度訊息
const methodLabel = method === 'embedding' ? 'Embedding' : 'LLM';
const pairCount = (descriptions.length * (descriptions.length - 1)) / 2;
const progressMessage = method === 'llm'
? `Processing ${descriptions.length} descriptions with LLM (${pairCount} comparisons)...`
: `Processing ${descriptions.length} descriptions with ${methodLabel}...`;
? (lang === 'zh'
? `正在使用 LLM 處理 ${descriptions.length} 個描述(${pairCount} 次比較)...`
: `Processing ${descriptions.length} descriptions with LLM (${pairCount} comparisons)...`)
: (lang === 'zh'
? `正在使用 ${methodLabel} 處理 ${descriptions.length} 個描述...`
: `Processing ${descriptions.length} descriptions with ${methodLabel}...`);
setProgress({
step: 'processing',
@@ -57,19 +63,22 @@ export function useDeduplication() {
descriptions,
similarity_threshold: threshold,
method,
lang,
});
setResult(deduplicationResult);
setProgress({
step: 'done',
message: `Found ${deduplicationResult.total_groups} unique groups, ${deduplicationResult.total_duplicates} duplicates (${methodLabel})`,
message: lang === 'zh'
? `發現 ${deduplicationResult.total_groups} 個獨特群組,${deduplicationResult.total_duplicates} 個重複(${methodLabel}`
: `Found ${deduplicationResult.total_groups} unique groups, ${deduplicationResult.total_duplicates} duplicates (${methodLabel})`,
});
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Unknown error';
setError(errorMessage);
setProgress({
step: 'error',
message: 'Deduplication failed',
message: lang === 'zh' ? '去重失敗' : 'Deduplication failed',
error: errorMessage,
});
} finally {

View File

@@ -0,0 +1,218 @@
import { useState, useCallback } from 'react';
import type {
AttributeDAG,
DAGStreamAnalyzeResponse,
PathId,
PathState,
DualPathState,
DAGProgress,
CategoryMode,
PromptLanguage,
} from '../types';
import { CategoryMode as CategoryModeValues } from '../types';
import { analyzeAttributesStream } from '../services/api';
const initialProgress: DAGProgress = {
step: 'idle',
message: '',
};
const initialPathState: PathState = {
query: '',
result: null,
loading: false,
progress: initialProgress,
error: null,
};
export interface AnalyzeOptions {
model?: string;
temperature?: number;
chainCount?: number;
categoryMode?: CategoryMode;
customCategories?: string[];
suggestedCategoryCount?: number;
lang?: PromptLanguage;
}
export function useDualPathAttribute() {
const [state, setState] = useState<DualPathState>({
pathA: { ...initialPathState },
pathB: { ...initialPathState },
});
const getPathKey = (pathId: PathId): 'pathA' | 'pathB' =>
pathId === 'A' ? 'pathA' : 'pathB';
const updatePathState = useCallback((
pathId: PathId,
updates: Partial<PathState>
) => {
const pathKey = getPathKey(pathId);
setState(prev => ({
...prev,
[pathKey]: {
...prev[pathKey],
...updates,
},
}));
}, []);
const updatePathProgress = useCallback((
pathId: PathId,
step: DAGProgress['step'],
message: string,
error?: string
) => {
updatePathState(pathId, {
progress: { step, message, error },
});
}, [updatePathState]);
const analyzePath = useCallback(async (
pathId: PathId,
query: string,
options: AnalyzeOptions = {}
) => {
const {
model,
temperature,
chainCount = 5,
categoryMode = CategoryModeValues.DYNAMIC_AUTO,
customCategories,
suggestedCategoryCount = 3,
lang = 'zh',
} = options;
// Reset state for this path
updatePathState(pathId, {
query,
result: null,
loading: true,
error: null,
progress: { step: 'idle', message: lang === 'zh' ? '準備分析中...' : 'Preparing analysis...' },
});
try {
await analyzeAttributesStream(
{
query,
chain_count: chainCount,
model,
temperature,
category_mode: categoryMode,
custom_categories: customCategories,
suggested_category_count: suggestedCategoryCount,
lang,
},
{
onStep0Start: () => {
updatePathProgress(pathId, 'step0', lang === 'zh' ? '正在分析類別...' : 'Analyzing categories...');
},
onStep0Complete: () => {
updatePathProgress(pathId, 'step0', lang === 'zh' ? '類別分析完成' : 'Categories analyzed');
},
onCategoriesResolved: (categories) => {
updatePathProgress(pathId, 'step0', lang === 'zh' ? `使用 ${categories.length} 個類別` : `Using ${categories.length} categories`);
},
onStep1Start: () => {
updatePathProgress(pathId, 'step1', lang === 'zh' ? '正在分析屬性...' : 'Analyzing attributes...');
},
onStep1Complete: () => {
updatePathProgress(pathId, 'step1', lang === 'zh' ? '屬性分析完成' : 'Attributes analyzed');
},
onRelationshipsStart: () => {
updatePathProgress(pathId, 'relationships', lang === 'zh' ? '正在生成關係...' : 'Generating relationships...');
},
onRelationshipsComplete: (count) => {
updatePathProgress(pathId, 'relationships', lang === 'zh' ? `生成 ${count} 個關係` : `Generated ${count} relationships`);
},
onDone: (response: DAGStreamAnalyzeResponse) => {
updatePathState(pathId, {
result: response.dag,
loading: false,
progress: { step: 'done', message: lang === 'zh' ? '分析完成!' : 'Analysis complete!' },
});
},
onError: (errorMsg) => {
updatePathState(pathId, {
loading: false,
error: errorMsg,
progress: { step: 'error', message: lang === 'zh' ? `錯誤: ${errorMsg}` : `Error: ${errorMsg}`, error: errorMsg },
});
},
}
);
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Unknown error';
updatePathState(pathId, {
loading: false,
error: errorMessage,
progress: { step: 'error', message: lang === 'zh' ? `錯誤: ${errorMessage}` : `Error: ${errorMessage}`, error: errorMessage },
});
}
}, [updatePathState, updatePathProgress]);
const analyzeParallel = useCallback(async (
queryA: string,
queryB: string,
options: AnalyzeOptions = {}
) => {
// Run both analyses in parallel
await Promise.all([
analyzePath('A', queryA, options),
analyzePath('B', queryB, options),
]);
}, [analyzePath]);
const clearPath = useCallback((pathId: PathId) => {
updatePathState(pathId, { ...initialPathState });
}, [updatePathState]);
const clearAll = useCallback(() => {
setState({
pathA: { ...initialPathState },
pathB: { ...initialPathState },
});
}, []);
const setPathResult = useCallback((pathId: PathId, result: AttributeDAG) => {
updatePathState(pathId, {
result,
loading: false,
progress: { step: 'done', message: 'Loaded from history' },
});
}, [updatePathState]);
// Computed properties
const pathA = state.pathA;
const pathB = state.pathB;
const bothLoading = pathA.loading && pathB.loading;
const anyLoading = pathA.loading || pathB.loading;
const bothComplete = pathA.result !== null && pathB.result !== null;
const bothIdle = pathA.progress.step === 'idle' && pathB.progress.step === 'idle';
return {
state,
pathA,
pathB,
analyzePath,
analyzeParallel,
clearPath,
clearAll,
setPathResult,
// Status flags
bothLoading,
anyLoading,
bothComplete,
bothIdle,
};
}

View File

@@ -8,6 +8,7 @@ import type {
ExpertProfile,
CategoryDefinition,
ExpertSource,
PromptLanguage,
} from '../types';
interface UseExpertTransformationOptions {
@@ -15,6 +16,7 @@ interface UseExpertTransformationOptions {
temperature?: number;
expertSource?: ExpertSource;
expertLanguage?: 'en' | 'zh';
lang?: PromptLanguage;
}
export function useExpertTransformation(options: UseExpertTransformationOptions = {}) {
@@ -48,11 +50,12 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
return new Promise((resolve) => {
let categoryExperts: ExpertProfile[] = [];
const lang = options.lang || 'zh';
setProgress((prev) => ({
...prev,
step: 'expert',
currentCategory: category.name,
message: `組建專家團隊...`,
message: lang === 'zh' ? '組建專家團隊...' : 'Building expert team...',
}));
expertTransformCategoryStream(
@@ -67,13 +70,14 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
expert_language: options.expertLanguage,
model: options.model,
temperature: options.temperature,
lang,
},
{
onExpertStart: () => {
setProgress((prev) => ({
...prev,
step: 'expert',
message: `正在組建專家團隊...`,
message: lang === 'zh' ? '正在組建專家團隊...' : 'Building expert team...',
}));
},
onExpertComplete: (expertsData) => {
@@ -82,40 +86,40 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
setProgress((prev) => ({
...prev,
experts: expertsData,
message: `專家團隊組建完成(${expertsData.length}位專家)`,
message: lang === 'zh' ? `專家團隊組建完成(${expertsData.length}位專家)` : `Expert team ready (${expertsData.length} experts)`,
}));
},
onKeywordStart: () => {
setProgress((prev) => ({
...prev,
step: 'keyword',
message: `專家團隊為「${category.name}」的屬性生成關鍵字...`,
message: lang === 'zh' ? `專家團隊為「${category.name}」的屬性生成關鍵字...` : `Experts generating keywords for "${category.name}"...`,
}));
},
onKeywordProgress: (data) => {
setProgress((prev) => ({
...prev,
currentAttribute: data.attribute,
message: `為「${data.attribute}」生成了 ${data.count} 個關鍵字`,
message: lang === 'zh' ? `為「${data.attribute}」生成了 ${data.count} 個關鍵字` : `Generated ${data.count} keywords for "${data.attribute}"`,
}));
},
onKeywordComplete: (totalKeywords) => {
setProgress((prev) => ({
...prev,
message: `共生成了 ${totalKeywords} 個專家關鍵字`,
message: lang === 'zh' ? `共生成了 ${totalKeywords} 個專家關鍵字` : `Generated ${totalKeywords} expert keywords`,
}));
},
onDescriptionStart: () => {
setProgress((prev) => ({
...prev,
step: 'description',
message: `為「${category.name}」的專家關鍵字生成創新描述...`,
message: lang === 'zh' ? `為「${category.name}」的專家關鍵字生成創新描述...` : `Generating descriptions for "${category.name}" keywords...`,
}));
},
onDescriptionComplete: (count) => {
setProgress((prev) => ({
...prev,
message: `生成了 ${count} 個創新描述`,
message: lang === 'zh' ? `生成了 ${count} 個創新描述` : `Generated ${count} descriptions`,
}));
},
onDone: (data) => {
@@ -123,7 +127,7 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
...prev,
step: 'done',
processedCategories: [...prev.processedCategories, category.name],
message: `${category.name}」處理完成`,
message: lang === 'zh' ? `${category.name}」處理完成` : `"${category.name}" complete`,
}));
resolve({
result: data.result,
@@ -135,7 +139,7 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
...prev,
step: 'error',
error: err,
message: `處理「${category.name}」時發生錯誤`,
message: lang === 'zh' ? `處理「${category.name}」時發生錯誤` : `Error processing "${category.name}"`,
}));
resolve({
result: null,
@@ -148,7 +152,7 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
...prev,
step: 'error',
error: err.message,
message: `處理「${category.name}」時發生錯誤`,
message: lang === 'zh' ? `處理「${category.name}」時發生錯誤` : `Error processing "${category.name}"`,
}));
resolve({
result: null,
@@ -157,11 +161,12 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
});
});
},
[options.model, options.temperature, options.expertSource, options.expertLanguage]
[options.model, options.temperature, options.expertSource, options.expertLanguage, options.lang]
);
const transformAll = useCallback(
async (input: ExpertTransformationInput) => {
const lang = options.lang || 'zh';
setLoading(true);
setError(null);
setResults(null);
@@ -170,7 +175,7 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
step: 'idle',
currentCategory: '',
processedCategories: [],
message: '開始處理...',
message: lang === 'zh' ? '開始處理...' : 'Starting...',
});
const categoryResults: ExpertTransformationCategoryResult[] = [];
@@ -210,12 +215,12 @@ export function useExpertTransformation(options: UseExpertTransformationOptions
setProgress((prev) => ({
...prev,
step: 'done',
message: '所有類別處理完成',
message: lang === 'zh' ? '所有類別處理完成' : 'All categories complete',
}));
return finalResult;
},
[transformCategory]
[transformCategory, options.lang]
);
const clearResults = useCallback(() => {

View File

@@ -12,11 +12,15 @@ import type {
ExpertTransformationCategoryResult,
ExpertProfile,
DeduplicationRequest,
DeduplicationResult
DeduplicationResult,
PatentSearchRequest,
PatentSearchResponse,
BatchPatentSearchRequest,
BatchPatentSearchResponse,
} from '../types';
// 自動使用當前瀏覽器的 hostname支援遠端存取
const API_BASE_URL = `http://${window.location.hostname}:8000/api`;
const API_BASE_URL = `http://${window.location.hostname}:8001/api`;
export interface SSECallbacks {
onStep0Start?: () => void;
@@ -322,3 +326,43 @@ export async function deduplicateDescriptions(
return response.json();
}
// ===== Patent Search API =====
export async function searchPatents(
request: PatentSearchRequest
): Promise<PatentSearchResponse> {
const response = await fetch(`${API_BASE_URL}/patent/search`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(request),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`API error: ${response.status} - ${errorText}`);
}
return response.json();
}
export async function batchSearchPatents(
request: BatchPatentSearchRequest
): Promise<BatchPatentSearchResponse> {
const response = await fetch(`${API_BASE_URL}/patent/search/batch`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(request),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`API error: ${response.status} - ${errorText}`);
}
return response.json();
}

View File

@@ -1,6 +1,9 @@
// ===== Language type =====
export type PromptLanguage = 'zh' | 'en';
export interface AttributeNode {
name: string;
category?: string; // 材料, 功能, 用途, 使用族群
category?: string; // Materials, Functions, Usages, User Groups
children?: AttributeNode[];
}
@@ -9,16 +12,29 @@ export interface AnalyzeRequest {
model?: string;
temperature?: number;
categories?: string[];
lang?: PromptLanguage;
}
export const DEFAULT_CATEGORIES = ['材料', '功能', '用途', '使用族群', '特性'];
export const DEFAULT_CATEGORIES = {
zh: ['材料', '功能', '用途', '使用族群', '特性'],
en: ['Materials', 'Functions', 'Usages', 'User Groups', 'Characteristics'],
};
export const CATEGORY_DESCRIPTIONS: Record<string, string> = {
export const CATEGORY_DESCRIPTIONS: Record<PromptLanguage, Record<string, string>> = {
zh: {
'材料': '物件由什麼材料組成',
'功能': '物件能做什麼',
'用途': '物件在什麼場景使用',
'使用族群': '誰會使用這個物件',
'特性': '物件有什麼特徵',
},
en: {
'Materials': 'What materials the object is made of',
'Functions': 'What the object can do',
'Usages': 'In what scenarios the object is used',
'User Groups': 'Who uses this object',
'Characteristics': 'What features the object has',
},
};
export interface AnalyzeResponse {
@@ -92,6 +108,8 @@ export interface StreamAnalyzeRequest {
category_mode?: CategoryMode;
custom_categories?: string[];
suggested_category_count?: number;
// Language setting
lang?: PromptLanguage;
}
export interface StreamProgress {
@@ -161,6 +179,7 @@ export interface TransformationRequest {
model?: string;
temperature?: number;
keyword_count?: number;
lang?: PromptLanguage;
}
export interface TransformationDescription {
@@ -238,11 +257,12 @@ export interface ExpertTransformationRequest {
attributes: string[];
expert_count: number; // 2-8
keywords_per_expert: number; // 1-3
custom_experts?: string[]; // ["藥師", "工程師"]
expert_source?: ExpertSource; // 專家來源 (default: 'llm')
expert_language?: string; // 外部來源語言 (default: 'en')
custom_experts?: string[]; // User-specified experts
expert_source?: ExpertSource; // Expert source (default: 'llm')
expert_language?: string; // External source language (default: 'en')
model?: string;
temperature?: number;
lang?: PromptLanguage; // Prompt language
}
export interface ExpertTransformationProgress {
@@ -272,9 +292,10 @@ export type DeduplicationMethod = 'embedding' | 'llm';
export interface DeduplicationRequest {
descriptions: ExpertTransformationDescription[];
method?: DeduplicationMethod; // 去重方法,default: 'embedding'
similarity_threshold?: number; // 0.0-1.0, default 0.85,僅 embedding 使用
method?: DeduplicationMethod; // Deduplication method, default: 'embedding'
similarity_threshold?: number; // 0.0-1.0, default 0.85, only for embedding
model?: string; // Embedding/LLM model
lang?: PromptLanguage; // Prompt language (for LLM method)
}
export interface DescriptionGroup {
@@ -299,3 +320,127 @@ export interface DeduplicationProgress {
message: string;
error?: string;
}
// ===== Dual-Path types =====
export type PathId = 'A' | 'B';
export interface PathState {
query: string;
result: AttributeDAG | null;
loading: boolean;
progress: DAGProgress;
error: string | null;
}
export interface DualPathState {
pathA: PathState;
pathB: PathState;
}
export interface DAGProgress {
step: 'idle' | 'step0' | 'step1' | 'relationships' | 'done' | 'error';
message: string;
error?: string;
}
// ===== Attribute Crossover types =====
export interface CrossoverPair {
id: string;
sourcePathId: PathId;
sourceNode: DAGNode;
targetPathId: PathId;
targetNode: DAGNode;
crossType: string; // 'material-function' | 'function-usage' | etc.
selected: boolean;
}
export interface CrossoverConfig {
enabledCrossTypes: string[];
maxPairsPerType: number;
autoGenerate: boolean;
}
export interface CrossoverResult {
pairs: CrossoverPair[];
totalPairs: number;
}
// ===== Expert Mode types =====
export type ExpertMode = 'shared' | 'independent';
export interface ExpertConfig {
expert_count: number;
keywords_per_expert: number;
custom_experts?: string[];
expert_source: ExpertSource;
expert_language?: 'en' | 'zh';
}
export interface DualPathExpertConfig {
mode: ExpertMode;
sharedConfig?: ExpertConfig;
independentConfig?: {
pathA: ExpertConfig;
pathB: ExpertConfig;
};
}
// ===== Dual-Path Transformation types =====
export interface DualPathTransformationState {
pathA: ExpertTransformationDAGResult | null;
pathB: ExpertTransformationDAGResult | null;
crossover: CrossoverTransformationResult | null;
}
export interface CrossoverTransformationResult {
crossoverPairs: CrossoverPair[];
experts: ExpertProfile[];
transformedIdeas: ExpertTransformationDescription[];
}
// ===== Patent Search types =====
export interface PatentResult {
publication_number: string;
title: string;
snippet: string;
publication_date: string | null;
assignee: string | null;
inventor: string | null;
status: 'ACTIVE' | 'NOT_ACTIVE' | 'UNKNOWN';
pdf_url: string | null;
thumbnail_url: string | null;
}
export interface PatentSearchRequest {
query: string;
max_results?: number;
}
export interface PatentSearchResponse {
query: string;
total_results: number;
patents: PatentResult[];
error?: string | null;
}
export interface BatchPatentSearchRequest {
queries: string[];
max_results_per_query?: number;
}
export interface BatchPatentSearchResult {
query: string;
total_results: number;
patents: PatentResult[];
error?: string | null;
}
export interface BatchPatentSearchResponse {
results: BatchPatentSearchResult[];
total_queries: number;
}

View File

@@ -0,0 +1,197 @@
import type { CrossoverPair, AttributeDAG, CategoryDefinition, DAGNode } from '../types';
/**
* Result of crossover transformation - two separate DAGs
* Each path receives attributes from the other path
*/
export interface CrossoverDAGResult {
// Path A receives attributes from Path B
pathA: AttributeDAG;
// Path B receives attributes from Path A
pathB: AttributeDAG;
}
/**
* Convert selected crossover pairs into two separate AttributeDAGs.
*
* The crossover logic:
* - When pair (A's "防水布" × B's "鋁合金") is selected:
* - Path A gets "鋁合金" (from B) to transform with A's context
* - Path B gets "防水布" (from A) to transform with B's context
*
* This allows experts to think about:
* - "What if umbrella had aluminum alloy?" (A gets B's attribute)
* - "What if bicycle had waterproof fabric?" (B gets A's attribute)
*/
export function crossoverPairsToDAGs(
pairs: CrossoverPair[],
dagA: AttributeDAG,
dagB: AttributeDAG
): CrossoverDAGResult {
// Collect attributes that each path receives from the other
const attributesForA: Map<string, Set<string>> = new Map(); // category -> attributes from B
const attributesForB: Map<string, Set<string>> = new Map(); // category -> attributes from A
for (const pair of pairs) {
// Path A receives the target node (from B)
// Path B receives the source node (from A)
if (pair.sourcePathId === 'A' && pair.targetPathId === 'B') {
// A's attribute crossed with B's attribute
// A gets B's attribute, B gets A's attribute
const categoryForA = pair.targetNode.category;
const categoryForB = pair.sourceNode.category;
if (!attributesForA.has(categoryForA)) attributesForA.set(categoryForA, new Set());
if (!attributesForB.has(categoryForB)) attributesForB.set(categoryForB, new Set());
attributesForA.get(categoryForA)!.add(pair.targetNode.name);
attributesForB.get(categoryForB)!.add(pair.sourceNode.name);
} else if (pair.sourcePathId === 'B' && pair.targetPathId === 'A') {
// B's attribute crossed with A's attribute
// A gets B's attribute, B gets A's attribute (reverse direction)
const categoryForA = pair.sourceNode.category;
const categoryForB = pair.targetNode.category;
if (!attributesForA.has(categoryForA)) attributesForA.set(categoryForA, new Set());
if (!attributesForB.has(categoryForB)) attributesForB.set(categoryForB, new Set());
attributesForA.get(categoryForA)!.add(pair.sourceNode.name);
attributesForB.get(categoryForB)!.add(pair.targetNode.name);
}
}
// Build DAG for Path A (receives attributes from B)
const pathAResult = buildCrossoverDAG(
dagA.query,
attributesForA,
`Crossover: ${dagB.query}${dagA.query}`
);
// Build DAG for Path B (receives attributes from A)
const pathBResult = buildCrossoverDAG(
dagB.query,
attributesForB,
`Crossover: ${dagA.query}${dagB.query}`
);
return {
pathA: pathAResult,
pathB: pathBResult,
};
}
/**
* Build an AttributeDAG from crossover attributes
*/
function buildCrossoverDAG(
originalQuery: string,
attributesByCategory: Map<string, Set<string>>,
_crossoverDescription: string
): AttributeDAG {
const categories: CategoryDefinition[] = [];
const nodes: DAGNode[] = [];
let nodeIndex = 0;
let categoryIndex = 0;
for (const [categoryName, attributes] of attributesByCategory.entries()) {
categories.push({
name: categoryName,
description: `Crossover attributes for ${categoryName}`,
is_fixed: false,
order: categoryIndex,
});
categoryIndex++;
for (const attrName of attributes) {
nodes.push({
id: `crossover-${nodeIndex}`,
name: attrName,
category: categoryName,
order: nodeIndex,
});
nodeIndex++;
}
}
return {
query: originalQuery,
categories,
nodes,
edges: [],
};
}
/**
* Legacy function - converts to single DAG (deprecated)
* Kept for backwards compatibility
*/
export function crossoverPairsToDAG(
pairs: CrossoverPair[],
queryA: string,
queryB: string
): AttributeDAG {
// Group pairs by crossType
const pairsByType = pairs.reduce((acc, pair) => {
if (!acc[pair.crossType]) acc[pair.crossType] = [];
acc[pair.crossType].push(pair);
return acc;
}, {} as Record<string, CrossoverPair[]>);
// Create categories from crossTypes
const categories: CategoryDefinition[] = Object.keys(pairsByType).map((crossType, index) => ({
name: formatCrossTypeName(crossType),
description: getCrossTypeDescription(crossType),
is_fixed: false,
order: index,
}));
// Create nodes from pairs
const nodes: DAGNode[] = [];
let nodeIndex = 0;
for (const [crossType, typePairs] of Object.entries(pairsByType)) {
const categoryName = formatCrossTypeName(crossType);
for (const pair of typePairs) {
nodes.push({
id: `crossover-${nodeIndex}`,
name: `${pair.sourceNode.name} × ${pair.targetNode.name}`,
category: categoryName,
order: nodeIndex,
});
nodeIndex++;
}
}
return {
query: `${queryA} × ${queryB}`,
categories,
nodes,
edges: [],
};
}
function formatCrossTypeName(crossType: string): string {
if (crossType.startsWith('same-')) {
const category = crossType.replace('same-', '');
return `Same: ${category}`;
}
if (crossType.startsWith('cross-')) {
const parts = crossType.replace('cross-', '').split('-');
if (parts.length >= 2) {
return `Cross: ${parts[0]} × ${parts.slice(1).join('-')}`;
}
}
return crossType;
}
function getCrossTypeDescription(crossType: string): string {
if (crossType.startsWith('same-')) {
const category = crossType.replace('same-', '');
return `Crossover pairs from the same category: ${category}`;
}
if (crossType.startsWith('cross-')) {
return `Crossover pairs from different categories`;
}
return `Crossover pairs`;
}

38
research/README.md Normal file
View File

@@ -0,0 +1,38 @@
# Research: Expert-Augmented LLM Ideation
This folder contains research materials for the academic paper on the novelty-seeking system.
## Files
| File | Description |
|------|-------------|
| `literature_review.md` | Comprehensive literature review covering semantic distance theory, conceptual blending, design fixation, LLM limitations, and related work |
| `references.md` | 55+ academic references with links to papers |
| `theoretical_framework.md` | The "Semantic Gravity" theoretical model and testable hypotheses |
| `paper_outline.md` | Complete paper structure, experimental design, and target venues |
## Key Theoretical Contribution
**"Semantic Gravity"**: LLMs exhibit a tendency to generate outputs clustered around high-probability regions of their training distribution, limiting creative novelty. Expert perspectives provide "escape velocity" to break free from this gravity.
## Core Hypotheses
1. **H1**: Multi-expert generation → higher semantic diversity
2. **H2**: Multi-expert generation → lower patent overlap (higher novelty)
3. **H3**: Diversity increases with expert count (diminishing returns ~4-6)
4. **H4**: Expert source affects unconventionality of ideas
## Target Venues
- **CHI** (ACM Conference on Human Factors in Computing Systems)
- **CSCW** (ACM Conference on Computer-Supported Cooperative Work)
- **Creativity & Cognition** (ACM Conference)
- **IJHCS** (International Journal of Human-Computer Studies)
## Next Steps
1. Design concrete experiment protocol
2. Add measurement code to existing system
3. Collect experimental data
4. Conduct human evaluation
5. Write and submit paper

View File

@@ -0,0 +1,555 @@
# Experimental Protocol: Expert-Augmented LLM Ideation
## Executive Summary
This document outlines a comprehensive experimental design to test the hypothesis that multi-expert LLM-based ideation produces more diverse and novel ideas than direct LLM generation.
---
## 1. Research Questions
| ID | Research Question |
|----|-------------------|
| **RQ1** | Does multi-expert generation produce higher semantic diversity than direct LLM generation? |
| **RQ2** | Does multi-expert generation produce ideas with lower patent overlap (higher novelty)? |
| **RQ3** | What is the optimal number of experts for maximizing diversity? |
| **RQ4** | How do different expert sources (LLM vs Curated vs DBpedia) affect idea quality? |
| **RQ5** | Does structured attribute decomposition enhance the multi-expert effect? |
---
## 2. Experimental Design Overview
### 2.1 Design Type
**Mixed Design**: Between-subjects for main conditions × Within-subjects for queries
### 2.2 Variables
#### Independent Variables (Manipulated)
| Variable | Levels | Your System Parameter |
|----------|--------|----------------------|
| **Generation Method** | 5 levels (see conditions) | Condition-dependent |
| **Expert Count** | 1, 2, 4, 6, 8 | `expert_count` |
| **Expert Source** | LLM, Curated, DBpedia | `expert_source` |
| **Attribute Structure** | With/Without decomposition | Pipeline inclusion |
#### Dependent Variables (Measured)
| Variable | Measurement Method |
|----------|-------------------|
| **Semantic Diversity** | Mean pairwise cosine distance (embeddings) |
| **Cluster Spread** | Number of clusters, silhouette score |
| **Patent Novelty** | 1 - (ideas with patent match / total ideas) |
| **Semantic Distance** | Distance from query centroid |
| **Human Novelty Rating** | 7-point Likert scale |
| **Human Usefulness Rating** | 7-point Likert scale |
| **Human Creativity Rating** | 7-point Likert scale |
#### Control Variables (Held Constant)
| Variable | Fixed Value |
|----------|-------------|
| LLM Model | Qwen3:8b (or specify) |
| Temperature | 0.7 |
| Total Ideas per Query | 20 |
| Keywords per Expert | 1 |
| Deduplication | Disabled for raw comparison |
| Language | English (for patent search) |
---
## 3. Experimental Conditions
### 3.1 Main Study: Generation Method Comparison
| Condition | Description | Implementation |
|-----------|-------------|----------------|
| **C1: Direct** | Direct LLM generation | Prompt: "Generate 20 creative ideas for [query]" |
| **C2: Single-Expert** | 1 expert × 20 ideas | `expert_count=1`, `keywords_per_expert=20` |
| **C3: Multi-Expert-4** | 4 experts × 5 ideas each | `expert_count=4`, `keywords_per_expert=5` |
| **C4: Multi-Expert-8** | 8 experts × 2-3 ideas each | `expert_count=8`, `keywords_per_expert=2-3` |
| **C5: Random-Perspective** | 4 random words as "perspectives" | Custom prompt with random nouns |
### 3.2 Expert Count Study
| Condition | Expert Count | Ideas per Expert |
|-----------|--------------|------------------|
| **E1** | 1 | 20 |
| **E2** | 2 | 10 |
| **E4** | 4 | 5 |
| **E6** | 6 | 3-4 |
| **E8** | 8 | 2-3 |
### 3.3 Expert Source Study
| Condition | Source | Implementation |
|-----------|--------|----------------|
| **S-LLM** | LLM-generated | `expert_source=ExpertSource.LLM` |
| **S-Curated** | Curated 210 occupations | `expert_source=ExpertSource.CURATED` |
| **S-DBpedia** | DBpedia 2164 occupations | `expert_source=ExpertSource.DBPEDIA` |
| **S-Random** | Random word "experts" | Custom implementation |
---
## 4. Query Dataset
### 4.1 Design Principles
- **Diversity**: Cover multiple domains (consumer products, technology, services, abstract concepts)
- **Complexity Variation**: Simple objects to complex systems
- **Familiarity Variation**: Common items to specialized equipment
- **Cultural Neutrality**: Concepts understandable across cultures
### 4.2 Query Set (30 Queries)
#### Category A: Everyday Objects (10)
| ID | Query | Complexity |
|----|-------|------------|
| A1 | Chair | Low |
| A2 | Umbrella | Low |
| A3 | Backpack | Low |
| A4 | Coffee mug | Low |
| A5 | Bicycle | Medium |
| A6 | Refrigerator | Medium |
| A7 | Smartphone | Medium |
| A8 | Running shoes | Medium |
| A9 | Kitchen knife | Low |
| A10 | Desk lamp | Low |
#### Category B: Technology & Tools (10)
| ID | Query | Complexity |
|----|-------|------------|
| B1 | Solar panel | Medium |
| B2 | Electric vehicle | High |
| B3 | 3D printer | High |
| B4 | Drone | Medium |
| B5 | Smart thermostat | Medium |
| B6 | Noise-canceling headphones | Medium |
| B7 | Water purifier | Medium |
| B8 | Wind turbine | High |
| B9 | Robotic vacuum | Medium |
| B10 | Wearable fitness tracker | Medium |
#### Category C: Services & Systems (10)
| ID | Query | Complexity |
|----|-------|------------|
| C1 | Food delivery service | Medium |
| C2 | Online education platform | High |
| C3 | Healthcare appointment system | High |
| C4 | Public transportation | High |
| C5 | Hotel booking system | Medium |
| C6 | Personal finance app | Medium |
| C7 | Grocery shopping experience | Medium |
| C8 | Parking solution | Medium |
| C9 | Elderly care service | High |
| C10 | Waste management system | High |
### 4.3 Sample Size Justification
Based on [CHI meta-study on effect sizes](https://dl.acm.org/doi/10.1145/3706598.3713671):
- **Queries**: 30 (crossed with conditions)
- **Expected effect size**: d = 0.5 (medium)
- **Power target**: 80%
- **For automatic metrics**: 30 queries × 5 conditions × 20 ideas = 3,000 ideas
- **For human evaluation**: Subset of 10 queries × 3 conditions × 20 ideas = 600 ideas
---
## 5. Automatic Metrics Collection
### 5.1 Semantic Diversity Metrics
#### 5.1.1 Mean Pairwise Distance (Primary)
```python
def compute_mean_pairwise_distance(ideas: List[str], embedding_model: str) -> float:
"""
Compute mean cosine distance between all idea pairs.
Higher = more diverse.
"""
embeddings = get_embeddings(ideas, model=embedding_model)
n = len(embeddings)
distances = []
for i in range(n):
for j in range(i+1, n):
dist = 1 - cosine_similarity(embeddings[i], embeddings[j])
distances.append(dist)
return np.mean(distances), np.std(distances)
```
#### 5.1.2 Cluster Analysis
```python
def compute_cluster_metrics(ideas: List[str], embedding_model: str) -> dict:
"""
Analyze idea clustering patterns.
"""
embeddings = get_embeddings(ideas, model=embedding_model)
# Find optimal k using silhouette score
silhouette_scores = []
for k in range(2, min(len(ideas), 10)):
kmeans = KMeans(n_clusters=k)
labels = kmeans.fit_predict(embeddings)
score = silhouette_score(embeddings, labels)
silhouette_scores.append((k, score))
best_k = max(silhouette_scores, key=lambda x: x[1])[0]
return {
'optimal_clusters': best_k,
'silhouette_score': max(silhouette_scores, key=lambda x: x[1])[1],
'cluster_distribution': compute_cluster_sizes(embeddings, best_k)
}
```
#### 5.1.3 Semantic Distance from Query
```python
def compute_query_distance(query: str, ideas: List[str], embedding_model: str) -> dict:
"""
Measure how far ideas are from the original query.
Higher = more novel/distant.
"""
query_emb = get_embedding(query, model=embedding_model)
idea_embs = get_embeddings(ideas, model=embedding_model)
distances = [1 - cosine_similarity(query_emb, e) for e in idea_embs]
return {
'mean_distance': np.mean(distances),
'max_distance': np.max(distances),
'min_distance': np.min(distances),
'std_distance': np.std(distances)
}
```
### 5.2 Patent Novelty Metrics
#### 5.2.1 Patent Overlap Rate
```python
def compute_patent_novelty(ideas: List[str], query: str) -> dict:
"""
Search patents for each idea and compute overlap rate.
Uses existing patent_search_service.
"""
matches = 0
match_details = []
for idea in ideas:
result = patent_search_service.search(idea)
if result.has_match:
matches += 1
match_details.append({
'idea': idea,
'patent': result.best_match
})
return {
'novelty_rate': 1 - (matches / len(ideas)),
'match_count': matches,
'total_ideas': len(ideas),
'match_details': match_details
}
```
### 5.3 Metrics Summary Table
| Metric | Formula | Interpretation |
|--------|---------|----------------|
| **Mean Pairwise Distance** | avg(1 - cos_sim(i, j)) for all pairs | Higher = more diverse |
| **Silhouette Score** | Cluster cohesion vs separation | Higher = clearer clusters |
| **Optimal Cluster Count** | argmax(silhouette) | More clusters = more themes |
| **Query Distance** | 1 - cos_sim(query, idea) | Higher = farther from original |
| **Patent Novelty Rate** | 1 - (matches / total) | Higher = more novel |
---
## 6. Human Evaluation Protocol
### 6.1 Participants
#### 6.1.1 Recruitment
- **Platform**: Prolific, MTurk, or domain experts
- **Sample Size**: 60 evaluators (20 per condition group)
- **Criteria**:
- Native English speakers
- Bachelor's degree or higher
- Attention check pass rate > 80%
#### 6.1.2 Compensation
- $15/hour equivalent
- ~30 minutes per session
- Bonus for high-quality ratings
### 6.2 Rating Scales
#### 6.2.1 Novelty (7-point Likert)
```
How novel/surprising is this idea?
1 = Not at all novel (very common/obvious)
4 = Moderately novel
7 = Extremely novel (never seen before)
```
#### 6.2.2 Usefulness (7-point Likert)
```
How useful/practical is this idea?
1 = Not at all useful (impractical)
4 = Moderately useful
7 = Extremely useful (highly practical)
```
#### 6.2.3 Creativity (7-point Likert)
```
How creative is this idea overall?
1 = Not at all creative
4 = Moderately creative
7 = Extremely creative
```
### 6.3 Procedure
1. **Introduction** (5 min)
- Study purpose (without revealing hypotheses)
- Rating scale explanation
- Practice with 3 example ideas
2. **Training** (5 min)
- Rate 5 calibration ideas with feedback
- Discuss edge cases
3. **Main Evaluation** (20 min)
- Rate 30 ideas (randomized order)
- 3 attention check items embedded
- Break after 15 ideas
4. **Debriefing** (2 min)
- Demographics
- Open-ended feedback
### 6.4 Quality Control
| Check | Threshold | Action |
|-------|-----------|--------|
| Attention checks | < 2/3 correct | Exclude |
| Completion time | < 10 min | Flag for review |
| Variance in ratings | All same score | Exclude |
| Inter-rater reliability | Cronbach's α < 0.7 | Review ratings |
### 6.5 Analysis Plan
#### 6.5.1 Reliability
- Cronbach's alpha for each scale
- ICC (Intraclass Correlation) for inter-rater agreement
#### 6.5.2 Main Analysis
- Mixed-effects ANOVA: Condition × Query
- Post-hoc: Tukey HSD for pairwise comparisons
- Effect sizes: Cohen's d
#### 6.5.3 Correlation with Automatic Metrics
- Pearson correlation: Human ratings vs semantic diversity
- Regression: Predict human ratings from automatic metrics
---
## 7. Experimental Procedure
### 7.1 Phase 1: Idea Generation
```
For each query Q in QuerySet:
For each condition C in Conditions:
If C == "Direct":
ideas = direct_llm_generation(Q, n=20)
Elif C == "Single-Expert":
expert = generate_expert(Q, n=1)
ideas = expert_transformation(Q, expert, ideas_per_expert=20)
Elif C == "Multi-Expert-4":
experts = generate_experts(Q, n=4)
ideas = expert_transformation(Q, experts, ideas_per_expert=5)
Elif C == "Multi-Expert-8":
experts = generate_experts(Q, n=8)
ideas = expert_transformation(Q, experts, ideas_per_expert=2-3)
Elif C == "Random-Perspective":
perspectives = random.sample(RANDOM_WORDS, 4)
ideas = perspective_generation(Q, perspectives, ideas_per=5)
Store(Q, C, ideas)
```
### 7.2 Phase 2: Automatic Metrics
```
For each (Q, C, ideas) in Results:
metrics = {
'diversity': compute_mean_pairwise_distance(ideas),
'clusters': compute_cluster_metrics(ideas),
'query_distance': compute_query_distance(Q, ideas),
'patent_novelty': compute_patent_novelty(ideas, Q)
}
Store(Q, C, metrics)
```
### 7.3 Phase 3: Human Evaluation
```
# Sample selection
selected_queries = random.sample(QuerySet, 10)
selected_conditions = ["Direct", "Multi-Expert-4", "Multi-Expert-8"]
# Create evaluation set
evaluation_items = []
For each Q in selected_queries:
For each C in selected_conditions:
ideas = Get(Q, C)
For each idea in ideas:
evaluation_items.append((Q, C, idea))
# Randomize and assign to evaluators
random.shuffle(evaluation_items)
assignments = assign_to_evaluators(evaluation_items, n_evaluators=60)
# Collect ratings
ratings = collect_human_ratings(assignments)
```
### 7.4 Phase 4: Analysis
```
# Automatic metrics analysis
Run ANOVA: diversity ~ condition + query + condition:query
Run post-hoc: Tukey HSD for condition pairs
Compute effect sizes
# Human ratings analysis
Check reliability: Cronbach's alpha, ICC
Run mixed-effects model: rating ~ condition + (1|evaluator) + (1|query)
Compute correlations: human vs automatic metrics
# Visualization
Plot: Diversity by condition (box plots)
Plot: t-SNE of idea embeddings colored by condition
Plot: Expert count vs diversity curve
```
---
## 8. Implementation Checklist
### 8.1 Code to Implement
- [ ] `experiments/generate_ideas.py` - Idea generation for all conditions
- [ ] `experiments/compute_metrics.py` - Automatic metric computation
- [ ] `experiments/export_for_evaluation.py` - Prepare human evaluation set
- [ ] `experiments/analyze_results.py` - Statistical analysis
- [ ] `experiments/visualize.py` - Generate figures
### 8.2 Data Files to Create
- [ ] `data/queries.json` - 30 queries with metadata
- [ ] `data/random_words.json` - Random perspective words
- [ ] `data/generated_ideas/` - Raw idea outputs
- [ ] `data/metrics/` - Computed metric results
- [ ] `data/human_ratings/` - Collected ratings
### 8.3 Analysis Outputs
- [ ] `results/diversity_by_condition.csv`
- [ ] `results/patent_novelty_by_condition.csv`
- [ ] `results/human_ratings_summary.csv`
- [ ] `results/statistical_tests.txt`
- [ ] `figures/` - All visualizations
---
## 9. Expected Results & Hypotheses
### 9.1 Primary Hypotheses
| Hypothesis | Prediction | Metric |
|------------|------------|--------|
| **H1** | Multi-Expert-4 > Single-Expert > Direct | Semantic diversity |
| **H2** | Multi-Expert-8 ≈ Multi-Expert-4 (diminishing returns) | Semantic diversity |
| **H3** | Multi-Expert > Direct | Patent novelty rate |
| **H4** | LLM experts > Curated > DBpedia | Unconventionality |
| **H5** | With attributes > Without attributes | Overall diversity |
### 9.2 Expected Effect Sizes
Based on related work:
- Diversity increase: d = 0.5-0.8 (medium to large)
- Patent novelty increase: 20-40% improvement
- Human creativity rating: d = 0.3-0.5 (small to medium)
### 9.3 Potential Confounds
| Confound | Mitigation |
|----------|-----------|
| Query difficulty | Crossed design (all queries × all conditions) |
| LLM variability | Multiple runs, fixed seed where possible |
| Evaluator bias | Randomized presentation, blinding |
| Order effects | Counterbalancing in human evaluation |
---
## 10. Timeline
| Week | Activity |
|------|----------|
| 1-2 | Implement idea generation scripts |
| 3 | Generate all ideas (5 conditions × 30 queries) |
| 4 | Compute automatic metrics |
| 5 | Design and pilot human evaluation |
| 6-7 | Run human evaluation (60 participants) |
| 8 | Analyze results |
| 9-10 | Write paper |
| 11 | Internal review |
| 12 | Submit |
---
## 11. Appendix: Direct Generation Prompt
For baseline condition C1 (Direct LLM generation):
```
You are a creative innovation consultant. Generate 20 unique and creative ideas
for improving or reimagining a [QUERY].
Requirements:
- Each idea should be distinct and novel
- Ideas should range from incremental improvements to radical innovations
- Consider different aspects: materials, functions, user experiences, contexts
- Provide a brief (15-30 word) description for each idea
Output format:
1. [Idea keyword]: [Description]
2. [Idea keyword]: [Description]
...
20. [Idea keyword]: [Description]
```
---
## 12. Appendix: Random Perspective Words
For condition C5 (Random-Perspective), sample from:
```json
[
"ocean", "mountain", "forest", "desert", "cave",
"microscope", "telescope", "kaleidoscope", "prism", "lens",
"butterfly", "elephant", "octopus", "eagle", "ant",
"sunrise", "thunderstorm", "rainbow", "fog", "aurora",
"clockwork", "origami", "mosaic", "symphony", "ballet",
"ancient", "futuristic", "organic", "crystalline", "liquid",
"whisper", "explosion", "rhythm", "silence", "echo"
]
```
This tests whether ANY perspective shift helps, or if EXPERT perspectives specifically matter.

View File

@@ -0,0 +1,209 @@
# Literature Review: Expert-Augmented LLM Ideation
## 1. Core Directly-Related Work
### 1.1 Wisdom of Crowds via Role Assumption
**Bringing the Wisdom of the Crowd to an Individual by Having the Individual Assume Different Roles** (ACM C&C 2017)
Groups of people tend to generate more diverse ideas than individuals because each group member brings a different perspective. This study showed it's possible to help individuals think more like a group by asking them to approach a problem from different perspectives. In an experiment with 54 crowd workers, participants who assumed different expert roles came up with more creative ideas than those who did not.
**Gap for our work**: This was human-based role-playing. Our system automates this with LLM-powered expert perspectives.
### 1.2 PersonaFlow: LLM-Simulated Expert Perspectives
**PersonaFlow: Designing LLM-Simulated Expert Perspectives for Enhanced Research Ideation** (2024)
PersonaFlow provides multiple perspectives by using LLMs to simulate domain-specific experts. User studies showed it increased the perceived relevance and creativity of ideated research directions and promoted users' critical thinking activities without increasing perceived cognitive load.
**Gap for our work**: PersonaFlow focuses on research ideation. Our system applies to product/innovation ideation with structured attribute decomposition.
### 1.3 PopBlends: Conceptual Blending with LLMs
**PopBlends: Strategies for Conceptual Blending with Large Language Models** (CHI 2023)
PopBlends automatically suggests conceptual blends using both traditional knowledge extraction and LLMs. Studies showed people found twice as many blend suggestions with the system, with half the mental demand.
**Gap for our work**: We structure blending through expert domain knowledge rather than direct concept pairing.
### 1.4 BILLY: Persona Vector Merging
**BILLY: Steering Large Language Models via Merging Persona Vectors for Creative Generation** (2025)
Proposes fusing persona vectors in activation space to steer LLM output towards multiple perspectives simultaneously, requiring only a single additive operation during inference.
**Gap for our work**: We use sequential multi-expert generation rather than vector fusion, allowing more explicit control and interpretability.
---
## 2. Theoretical Foundations
### 2.1 Semantic Distance Theory
**Core Insight** (Mednick, 1962): Creative thinking involves connecting weakly related, remote concepts in semantic memory. The farther one "moves away" from a conventional idea, the more creative the new idea will likely be.
**Key Research**:
- Semantic distance plays an important role in the creative process
- A more "flexible" semantic memory structure (higher connectivity, shorter distances) facilitates creative idea generation
- Quantitative measures using LSA and semantic networks can objectively examine creative output
- Divergent Semantic Integration (DSI) correlates strongly with human creativity ratings (72% variance explained)
**Application to Our Work**: Expert perspectives force semantic "jumps" to distant domains that LLMs wouldn't naturally traverse.
```
Without Expert: "Chair" → furniture, sitting, comfort (short semantic distance)
With Expert: "Chair" + Marine Biologist → pressure, buoyancy, coral (long semantic distance)
```
### 2.2 Conceptual Blending Theory
**Core Insight** (Fauconnier & Turner, 2002): Creative products emerge from blending elements of two input spaces into a novel integrated space.
**Key Research**:
- Blending process: (1) find connecting concept between inputs, (2) map elements that can be blended
- Generative AI demonstrates ability to blend and integrate concepts (bisociation)
- Trisociation (three-concept blending) is being used for AI-augmented idea generation
- Conceptual blending provides terminology for describing creative products
**Limitation**: Blending theory doesn't explain where inputs originate - the "inspiration problem."
**Application to Our Work**: Each expert provides a distinct "input space" enabling systematic multi-space blending. Our attribute decomposition provides structured inputs for blending.
### 2.3 Design Fixation
**Core Insight** (Jansson & Smith, 1991): Design fixation is "blind adherence to a set of ideas or concepts limiting the output of conceptual design."
**Key Research**:
- Fixation results from categorical knowledge organization around prototypes
- Accessing prototypes requires less cognitive effort than processing exemplars
- Diverse teams, model-making, and facilitation help prevent fixation
- Reflecting on prior fixation episodes is most effective prevention
**Neural Evidence**: fMRI studies show distinct patterns during fixated vs. creative ideation.
**Application to Our Work**: LLMs exhibit "semantic fixation" on high-probability outputs. Expert perspectives break this by forcing activation of non-prototype knowledge.
### 2.4 Constraint-Based Creativity
**Core Insight**: Paradoxically, constraints can enhance creativity by pushing beyond the path of least resistance.
**Key Research**:
- Constraints push people to search for more distant ideas in semantic memory
- Extreme constraints may require different types of creative problem-solving
- Not all constraints promote creativity for all individuals/tasks
- A "constraint-leveraging mindset" can be developed through experience
**Application to Our Work**: Expert role = productive constraint that expands rather than limits creative space. The expert perspective forces exploration of non-obvious solution spaces.
---
## 3. LLM Limitations in Creative Generation
### 3.1 Design Fixation from AI
**The Effects of Generative AI on Design Fixation and Divergent Thinking** (CHI 2024)
Key finding: AI exposure during ideation leads to HIGHER fixation. Participants who used AI produced:
- Fewer ideas
- Less variety
- Lower originality
compared to baseline (no AI assistance).
### 3.2 Dual Mechanisms: Inspiration vs. Fixation
**Inspiration Booster or Creative Fixation?** (Nature Humanities & Social Sciences, 2025)
- LLMs help in **simple** creative tasks (inspiration stimulation)
- LLMs **hurt** in **complex** creative tasks (creative fixation)
**Application to Our Work**: Our structured decomposition manages complexity, while multi-expert approach maintains inspiration benefits.
### 3.3 Statistical Pattern Perpetuation
**Bias and Fairness in Large Language Models: A Survey** (MIT Press, 2024)
LLMs learn, perpetuate, and amplify patterns from training data. This applies to creative outputs - LLMs generate what is statistically common/expected.
### 3.4 Generalization Bias
**Generalization Bias in LLM Summarization** (Royal Society, 2025)
LLMs' overgeneralization tendency produces outputs that lack sufficient empirical support. This suggests a bias toward "safe" middle-ground outputs rather than novel extremes.
---
## 4. Role-Playing and Perspective-Taking
### 4.1 Creativity Enhancement
Research on tabletop role-playing games (TTRPGs) demonstrates:
- Significant positive impact on creativity potential through divergent thinking
- TTRPG players exhibit significantly higher creativity than non-players
- Perspective-taking is closely linked to empathy and cognitive flexibility
### 4.2 Therapeutic and Educational Applications
- Role-playing develops perspective-taking, storytelling, creativity, and self-expression
- Physiological, emotional, and mental well-being from play enables creative ideation
- Play signals psychological safety, which is essential for creativity
### 4.3 Design Research Applications
- Role-playing stimulates creativity by exploring alternative solutions
- Offers safe environment to explore failure modes and challenge assumptions
- Well-suited for early-stage ideation and empathy-critical moments
---
## 5. Creativity Support Tools (CSTs)
### 5.1 Current State
- CSTs primarily support **divergent** thinking
- **Convergent** thinking often neglected
- Ideal CST should offer tailored support for both
### 5.2 AI as Creative Partner
- Collaborative ideation systems expose users to different ideas
- Competing theories on when/whether such exposure helps
- Tool-mediated expert activity view: computers as "mediating artifacts people act through"
### 5.3 Evaluation Methods
**Consensual Assessment Technique (CAT)**:
- Pool of experts independently evaluate artifacts
- Creative if high evaluations + high interrater reliability (Cronbach's alpha > 0.7)
**Semantic Distance Measures**:
- SemDis platform for automated creativity assessment
- Overcomes labor cost and subjectivity of human rating
- Uses NLP to quantify semantic relatedness
---
## 6. Our Theoretical Contribution
### The "Semantic Gravity" Problem
```
Direct LLM Generation:
P(idea | query)
→ Samples from high-probability region
→ Ideas cluster around training distribution modes
→ "Semantic gravity" pulls toward conventional associations
```
### Expert Transformation Solution
```
Conditioned Generation:
P(idea | query, expert)
→ Expert perspective activates distant semantic regions
→ Forces conceptual blending across domains
→ Breaks design fixation through productive constraints
```
### Multi-Expert Aggregation
```
Diverse Experts → Semantic Coverage
→ "Inner crowd" wisdom without actual crowd
→ Systematic exploration of idea space
→ Deduplication ensures non-redundant novelty
```
### Theoretical Model
1. **Attribute Decomposition**: Structures the problem space (categories, attributes)
2. **Expert Perspectives**: Forces semantic jumps to distant domains
3. **Multi-Expert Aggregation**: Achieves crowd-like diversity individually
4. **Deduplication**: Ensures generated ideas are truly distinct
5. **Patent Validation**: Grounds novelty in real-world uniqueness

288
research/paper_outline.md Normal file
View File

@@ -0,0 +1,288 @@
# Paper Outline: Expert-Augmented LLM Ideation
## Suggested Titles
1. **"Breaking Semantic Gravity: Expert-Augmented LLM Ideation for Enhanced Creativity"**
2. "Beyond Interpolation: Multi-Expert Perspectives for Combinatorial Innovation"
3. "Escaping the Relevance Trap: Structured Expert Frameworks for Creative AI"
4. "From Crowd to Expert: Simulating Diverse Perspectives for LLM-Based Ideation"
---
## Abstract (Draft)
Large Language Models (LLMs) are increasingly used for creative ideation, yet they exhibit a phenomenon we term "semantic gravity" - the tendency to generate outputs clustered around high-probability regions of their training distribution. This limits the novelty and diversity of generated ideas. We propose a multi-expert transformation framework that systematically activates diverse semantic regions by conditioning LLM generation on simulated expert perspectives. Our system decomposes concepts into structured attributes, generates ideas through multiple domain-expert viewpoints, and employs semantic deduplication to ensure genuine diversity. Through experiments comparing multi-expert generation against direct LLM generation and single-expert baselines, we demonstrate that our approach produces ideas with [X]% higher semantic diversity and [Y]% lower patent overlap. We contribute a theoretical framework explaining LLM creativity limitations and an open-source system for innovation ideation.
---
## 1. Introduction
### 1.1 The Promise and Problem of LLM Creativity
- LLMs widely adopted for creative tasks
- Initial enthusiasm: infinite idea generation
- Emerging concern: quality and diversity issues
### 1.2 The Semantic Gravity Problem
- Define the phenomenon
- Why it occurs (statistical learning, mode collapse)
- Why it matters (innovation requires novelty)
### 1.3 Our Solution: Expert-Augmented Ideation
- Brief overview of the approach
- Key insight: expert perspectives as semantic "escape velocity"
- Contributions preview
### 1.4 Paper Organization
- Roadmap for the rest of the paper
---
## 2. Related Work
### 2.1 Theoretical Foundations
- Semantic distance and creativity (Mednick, 1962)
- Conceptual blending theory (Fauconnier & Turner)
- Design fixation (Jansson & Smith)
- Constraint-based creativity
### 2.2 LLM Limitations in Creative Generation
- Design fixation from AI (CHI 2024)
- Dual mechanisms: inspiration vs. fixation
- Bias and pattern perpetuation
### 2.3 Persona-Based Prompting
- PersonaFlow (2024)
- BILLY persona vectors (2025)
- Quantifying persona effects (ACL 2024)
### 2.4 Creativity Support Tools
- Wisdom of crowds approaches
- Human-AI collaboration in ideation
- Evaluation methods (CAT, semantic distance)
### 2.5 Positioning Our Work
- Gap: No end-to-end system combining structured decomposition + multi-expert transformation + deduplication
- Distinction from PersonaFlow: product innovation focus, attribute structure
---
## 3. System Design
### 3.1 Overview
- Pipeline diagram
- Design rationale
### 3.2 Attribute Decomposition
- Category analysis (dynamic vs. fixed)
- Attribute generation per category
- DAG relationship mapping
### 3.3 Expert Team Generation
- Expert sources: LLM-generated, curated, external databases
- Diversity optimization strategies
- Domain coverage considerations
### 3.4 Expert Transformation
- Conditioning mechanism
- Keyword generation
- Description generation
- Parallel processing for efficiency
### 3.5 Semantic Deduplication
- Embedding-based approach
- LLM-based approach
- Threshold selection
### 3.6 Novelty Validation
- Patent search integration
- Overlap scoring
---
## 4. Experiments
### 4.1 Research Questions
- RQ1: Does multi-expert generation increase semantic diversity?
- RQ2: Does multi-expert generation reduce patent overlap?
- RQ3: What is the optimal number of experts?
- RQ4: How do expert sources affect output quality?
### 4.2 Experimental Setup
#### 4.2.1 Dataset
- N concepts/queries for ideation
- Selection criteria (diverse domains, complexity levels)
#### 4.2.2 Conditions
| Condition | Description |
|-----------|-------------|
| Baseline | Direct LLM: "Generate 20 creative ideas for X" |
| Single-Expert | 1 expert × 20 ideas |
| Multi-Expert-4 | 4 experts × 5 ideas each |
| Multi-Expert-8 | 8 experts × 2-3 ideas each |
| Random-Perspective | 4 random words as "perspectives" |
#### 4.2.3 Controls
- Same LLM model (specify version)
- Same temperature settings
- Same total idea count per condition
### 4.3 Metrics
#### 4.3.1 Semantic Diversity
- Mean pairwise cosine distance between embeddings
- Cluster distribution analysis
- Silhouette score for idea clustering
#### 4.3.2 Novelty
- Patent overlap rate
- Semantic distance from query centroid
#### 4.3.3 Quality (Human Evaluation)
- Novelty rating (1-7 Likert)
- Usefulness rating (1-7 Likert)
- Creativity rating (1-7 Likert)
- Interrater reliability (Cronbach's alpha)
### 4.4 Procedure
- Idea generation process
- Evaluation process
- Statistical analysis methods
---
## 5. Results
### 5.1 Semantic Diversity (RQ1)
- Quantitative results
- Visualization (t-SNE/UMAP of idea embeddings)
- Statistical significance tests
### 5.2 Patent Novelty (RQ2)
- Overlap rates by condition
- Examples of high-novelty ideas
### 5.3 Expert Count Analysis (RQ3)
- Diversity vs. expert count curve
- Diminishing returns analysis
- Optimal expert count recommendation
### 5.4 Expert Source Comparison (RQ4)
- LLM-generated vs. curated vs. random
- Unconventionality metrics
### 5.5 Human Evaluation Results
- Rating distributions
- Condition comparisons
- Correlation with automatic metrics
---
## 6. Discussion
### 6.1 Interpreting the Results
- Why multi-expert works
- The role of structured decomposition
- Deduplication importance
### 6.2 Theoretical Implications
- Semantic gravity as framework for LLM creativity
- Expert perspectives as productive constraints
- Inner crowd wisdom
### 6.3 Practical Implications
- When to use multi-expert approach
- Expert selection strategies
- Integration with existing workflows
### 6.4 Limitations
- LLM-specific results may not generalize
- Patent overlap as proxy for true novelty
- Human evaluation subjectivity
- Single-language experiments
### 6.5 Future Work
- Cross-cultural creativity
- Domain-specific expert optimization
- Real-world deployment studies
- Integration with other creativity techniques
---
## 7. Conclusion
- Summary of contributions
- Key takeaways
- Broader impact
---
## Appendices
### A. Prompt Templates
- Expert generation prompts
- Keyword generation prompts
- Description generation prompts
### B. Full Experimental Results
- Complete data tables
- Additional visualizations
### C. Expert Source Details
- Curated occupation list
- DBpedia/Wikidata query details
### D. Human Evaluation Protocol
- Instructions for raters
- Example ratings
- Training materials
---
## Target Venues
### Tier 1 (Recommended)
1. **CHI** - ACM Conference on Human Factors in Computing Systems
- Strong fit: creativity support tools, human-AI collaboration
- Deadline: typically September
2. **CSCW** - ACM Conference on Computer-Supported Cooperative Work
- Good fit: collaborative ideation, crowd wisdom
- Deadline: typically April/January
3. **Creativity & Cognition** - ACM Conference
- Perfect fit: computational creativity focus
- Smaller but specialized venue
### Tier 2 (Alternative)
4. **DIS** - ACM Designing Interactive Systems
- Good fit: design ideation tools
5. **UIST** - ACM Symposium on User Interface Software and Technology
- If system/interaction focus emphasized
6. **ICCC** - International Conference on Computational Creativity
- Specialized computational creativity venue
### Journal Options
1. **International Journal of Human-Computer Studies (IJHCS)**
2. **ACM Transactions on Computer-Human Interaction (TOCHI)**
3. **Design Studies**
4. **Creativity Research Journal**
---
## Timeline Checklist
- [ ] Finalize experimental design
- [ ] Collect/select query dataset
- [ ] Run all experimental conditions
- [ ] Compute automatic metrics
- [ ] Design human evaluation study
- [ ] Recruit evaluators
- [ ] Conduct human evaluation
- [ ] Statistical analysis
- [ ] Write first draft
- [ ] Internal review
- [ ] Revision
- [ ] Submit

208
research/references.md Normal file
View File

@@ -0,0 +1,208 @@
# References
## Core Related Work
1. **Siangliulue, P., Arnold, K. C., Gajos, K. Z., & Dow, S. P.** (2017). Bringing the Wisdom of the Crowd to an Individual by Having the Individual Assume Different Roles. *Proceedings of the 2017 ACM SIGCHI Conference on Creativity and Cognition (C&C '17)*, 131-141.
- https://dl.acm.org/doi/10.1145/3059454.3059467
2. **Liu, Y., Sharma, A., et al.** (2024). PersonaFlow: Designing LLM-Simulated Expert Perspectives for Enhanced Research Ideation. *arXiv preprint*.
- https://arxiv.org/html/2409.12538v1
- https://www.semanticscholar.org/paper/PersonaFlow:-Designing-LLM-Simulated-Expert-for-Liu-Sharma/eb0c224be9191e39452f20b2cbb886b5ecc4f57b
3. **Choi, J., et al.** (2023). PopBlends: Strategies for Conceptual Blending with Large Language Models. *Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems*.
- https://dl.acm.org/doi/10.1145/3544548.3580948
4. **BILLY Authors** (2025). BILLY: Steering Large Language Models via Merging Persona Vectors for Creative Generation. *arXiv preprint*.
- https://arxiv.org/html/2510.10157v1
---
## Semantic Distance & Creative Cognition
5. **Mednick, S. A.** (1962). The associative basis of the creative process. *Psychological Review, 69*(3), 220-232.
- (Classic foundational paper)
6. **Kenett, Y. N., & Faust, M.** (2019). Going the Extra Creative Mile: The Role of Semantic Distance in Creativity Theory, Research, and Measurement. *The Cambridge Handbook of the Neuroscience of Creativity*.
- https://www.cambridge.org/core/books/abs/cambridge-handbook-of-the-neuroscience-of-creativity/going-the-extra-creative-mile-the-role-of-semantic-distance-in-creativity-theory-research-and-measurement/3AD9143E69A463F85F2D8CC8940425CA
7. **Beaty, R. E., & Johnson, D. R.** (2021). Automating creativity assessment with SemDis: An open platform for computing semantic distance. *Behavior Research Methods, 53*, 757-780.
- https://link.springer.com/article/10.3758/s13428-020-01453-w
8. **What can quantitative measures of semantic distance tell us about creativity?** (2018). *Current Opinion in Behavioral Sciences*.
- https://www.sciencedirect.com/science/article/abs/pii/S2352154618301098
9. **Semantic Memory and Creativity: The Costs and Benefits of Semantic Memory Structure in Generating Original Ideas** (2023). *PMC*.
- https://pmc.ncbi.nlm.nih.gov/articles/PMC10128864/
10. **The Role of Semantic Associations as a Metacognitive Cue in Creative Idea Generation** (2023). *PMC*.
- https://pmc.ncbi.nlm.nih.gov/articles/PMC10141130/
---
## Conceptual Blending Theory
11. **Fauconnier, G., & Turner, M.** (2002). *The Way We Think: Conceptual Blending and the Mind's Hidden Complexities*. Basic Books.
12. **Conceptual Blending** - Wikipedia Overview
- https://en.wikipedia.org/wiki/Conceptual_blending
13. **Pereira, F. C.** (2007). *Creativity and Artificial Intelligence: A Conceptual Blending Approach*. Mouton de Gruyter.
- https://dl.acm.org/doi/10.5555/1557446
- https://www.researchgate.net/publication/332711522_Creativity_and_Artificial_Intelligence_A_Conceptual_Blending_Approach
14. **Confalonieri, R., et al.** (2018). A computational framework for conceptual blending. *Artificial Intelligence, 256*, 105-129.
- https://www.sciencedirect.com/science/article/pii/S000437021730142X
15. **Trisociation with AI for Creative Idea Generation** (2025). *California Management Review*.
- https://cmr.berkeley.edu/2025/01/trisociation-with-ai-for-creative-idea-generation/
---
## Design Fixation & Constraint-Based Creativity
16. **Jansson, D. G., & Smith, S. M.** (1991). Design fixation. *Design Studies, 12*(1), 3-11.
- (Classic foundational paper)
17. **Design Fixation: A Cognitive Model**. *Design Society*.
- https://www.designsociety.org/download-publication/25504/design_fixation_a_cognitive_model
18. **Crilly, N.** (2019). Research Design Fixation. *Cambridge Repository*.
- https://www.repository.cam.ac.uk/bitstreams/2c002015-8771-4694-ad48-0e4b52008bdf/download
19. **Using fMRI to deepen our understanding of design fixation** (2020). *Design Science, Cambridge Core*.
- https://www.cambridge.org/core/journals/design-science/article/using-fmri-to-deepen-our-understanding-of-design-fixation/2DD81FEE8ED682F6DFF415BF2948EFA6
20. **Acar, O. A., Tarakci, M., & van Knippenberg, D.** (2019). Creativity and Innovation Under Constraints: A Cross-Disciplinary Integrative Review. *Journal of Management, 45*(1), 96-121.
- https://journals.sagepub.com/doi/full/10.1177/0149206318805832
21. **Cromwell, J. R.** (2024). How combinations of constraint affect creativity: A new typology of creative problem solving in organizations. *Organizational Psychology Review*.
- https://journals.sagepub.com/doi/10.1177/20413866231202031
22. **Creativity from constraints: Theory and applications to education** (2022). *Thinking Skills and Creativity*.
- https://www.sciencedirect.com/science/article/abs/pii/S1871187122001870
---
## LLM Limitations in Creative Generation
23. **Wadinambiarachchi, S., et al.** (2024). The Effects of Generative AI on Design Fixation and Divergent Thinking. *Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems*.
- https://dl.acm.org/doi/full/10.1145/3613904.3642919
- https://arxiv.org/html/2403.11164v1
24. **Inspiration booster or creative fixation? The dual mechanisms of LLMs in shaping individual creativity in tasks of different complexity** (2025). *Humanities and Social Sciences Communications (Nature)*.
- https://www.nature.com/articles/s41599-025-05867-9
25. **Gallegos, I. O., et al.** (2024). Bias and Fairness in Large Language Models: A Survey. *Computational Linguistics, 50*(3), 1097-1179. MIT Press.
- https://direct.mit.edu/coli/article/50/3/1097/121961/Bias-and-Fairness-in-Large-Language-Models-A
26. **Generalization bias in large language model summarization of scientific research** (2025). *Royal Society Open Science, 12*(4).
- https://royalsocietypublishing.org/rsos/article/12/4/241776/235656/Generalization-bias-in-large-language-model
27. **LLLMs: A Data-Driven Survey of Evolving Research on Limitations of Large Language Models** (2025). *arXiv*.
- https://arxiv.org/html/2505.19240v1
---
## Persona Prompting & Multi-Agent Systems
28. **Quantifying the Persona Effect in LLM Simulations** (2024). *ACL 2024*.
- https://aclanthology.org/2024.acl-long.554.pdf
- https://www.emergentmind.com/topics/persona-effect-in-llm-simulations
29. **Two Tales of Persona in LLMs: A Survey of Role-Playing** (2024). *EMNLP Findings*.
- https://aclanthology.org/2024.findings-emnlp.969.pdf
30. **LLM Generated Persona is a Promise with a Catch** (2024). *Semantic Scholar*.
- https://www.semanticscholar.org/paper/LLM-Generated-Persona-is-a-Promise-with-a-Catch-Li-Chen/3ea29481ec11d1568fde727d236f71e44e4e2ad0
31. **Using AI for User Representation: An Analysis of 83 Persona Prompts** (2025). *arXiv*.
- https://arxiv.org/html/2508.13047v1
32. **Scaffolding Creativity: How Divergent and Convergent Personas Shape AI-Assisted Ideation** (2025). *arXiv*.
- https://arxiv.org/pdf/2510.26490
---
## Role-Playing & Perspective-Taking
33. **Chung, T. S.** (2013). Table-top role playing game and creativity. *Thinking Skills and Creativity, 8*, 56-71.
- https://www.researchgate.net/publication/257701334_Table-top_role_playing_game_and_creativity
34. **The effect of tabletop role-playing games on the creative potential and emotional creativity of Taiwanese college students** (2015). *Thinking Skills and Creativity*.
- https://www.researchgate.net/publication/284013184_The_effect_of_tabletop_role-playing_games_on_the_creative_potential_and_emotional_creativity_of_Taiwanese_college_students
35. **Psychology and Role-Playing Games** (2019). *ResearchGate*.
- https://www.researchgate.net/publication/331758159_Psychology_and_Role-Playing_Games
36. **Role Playing and Perspective Taking: An Educational Point of View** (2020). *ResearchGate*.
- https://www.researchgate.net/publication/346610467_Role_Playing_and_Perspective_Taking_An_Educational_Point_of_View
---
## Creativity Support Tools & Evaluation
37. **Jordanous, A.** (2018). Evaluating Computational Creativity: An Interdisciplinary Tutorial. *ACM Computing Surveys, 51*(2), Article 28.
- https://dl.acm.org/doi/10.1145/3167476
38. **Evaluating Creativity in Computational Co-Creative Systems** (2018). *ResearchGate*.
- https://www.researchgate.net/publication/326646917_Evaluating_Creativity_in_Computational_Co-Creative_Systems
39. **The Intersection of Users, Roles, Interactions, and Technologies in Creativity Support Tools** (2021). *DIS '21*.
- https://dl.acm.org/doi/10.1145/3461778.3462050
40. **What Counts as 'Creative' Work? Articulating Four Epistemic Positions in Creativity-Oriented HCI Research** (2024). *CHI '24*.
- https://dl.acm.org/doi/10.1145/3613904.3642854
41. **Colton, S., & Wiggins, G. A.** (2012). Computational Creativity: The Final Frontier? *ECAI 2012*.
- https://link.springer.com/article/10.1007/s00354-020-00116-w
---
## AI-Augmented Design & Ideation
42. **The effect of AI-based inspiration on human design ideation** (2023). *CoDesign*.
- https://www.tandfonline.com/doi/full/10.1080/21650349.2023.2167124
43. **A Hybrid Prototype Method Combining Physical Models and Generative AI to Support Creativity in Conceptual Design** (2024). *ACM TOCHI*.
- https://dl.acm.org/doi/10.1145/3689433
44. **Artificial intelligence for design education: A conceptual approach to enhance students' divergent and convergent thinking** (2025). *IJTDE*.
- https://link.springer.com/article/10.1007/s10798-025-09964-3
45. **The Ideation Compass: Supporting interdisciplinary creative dialogues with real time visualization** (2022). *CoDesign*.
- https://www.tandfonline.com/doi/full/10.1080/21650349.2022.2142674
46. **Guiding data-driven design ideation by knowledge distance** (2021). *Knowledge-Based Systems*.
- https://www.sciencedirect.com/science/article/abs/pii/S0950705121001362
---
## CHI/CSCW Related Papers
47. **Chan, J., Dang, S., & Dow, S. P.** (2016). Improving Crowd Innovation with Expert Facilitation. *CSCW '16*.
48. **Koch, J., et al.** (2020). ImageSense: An Intelligent Collaborative Ideation Tool to Support Diverse Human-Computer Partnerships. *CSCW '20*.
49. **Yu, L., Kittur, A., & Kraut, R. E.** (2014). Distributed Analogical Idea Generation: Inventing with Crowds. *CHI '14*.
50. **Crowdboard** (2017). *C&C '17*.
- https://dl.acm.org/doi/10.1145/3059454.3059477
51. **Collaborative Creativity** (2011). *CHI '11*.
- https://dl.acm.org/doi/10.1145/1978942.1979214
52. **Beyond Automation: How UI/UX Designers Perceive AI as a Creative Partner in the Divergent Thinking Stages** (2025). *arXiv*.
- https://arxiv.org/html/2501.18778
---
## Additional Resources
53. **Automatic Scoring of Metaphor Creativity with Large Language Models** (2024). *Creativity Research Journal*.
- https://www.tandfonline.com/doi/full/10.1080/10400419.2024.2326343
54. **Wisdom of Crowds** - Surowiecki, J. (2004). *The Wisdom of Crowds*. Doubleday.
- https://en.wikipedia.org/wiki/The_Wisdom_of_Crowds
55. **Research: When Used Correctly, LLMs Can Unlock More Creative Ideas** (2025). *Harvard Business Review*.
- https://hbr.org/2025/12/research-when-used-correctly-llms-can-unlock-more-creative-ideas

View File

@@ -0,0 +1,280 @@
# Theoretical Framework: Expert-Augmented LLM Ideation
## The Core Problem: LLM "Semantic Gravity"
### What is Semantic Gravity?
When LLMs generate creative ideas directly, they exhibit a phenomenon we term "semantic gravity" - the tendency to generate outputs that cluster around high-probability regions of their training distribution.
```
Direct LLM Generation:
Input: "Generate creative ideas for a chair"
LLM Process:
P(idea | "chair") → samples from training distribution
Result:
- "Ergonomic office chair" (high probability)
- "Foldable portable chair" (high probability)
- "Eco-friendly bamboo chair" (moderate probability)
Problem:
→ Ideas cluster in predictable semantic neighborhoods
→ Limited exploration of distant conceptual spaces
→ "Creative" outputs are interpolations, not extrapolations
```
### Why Does This Happen?
1. **Statistical Pattern Learning**: LLMs learn co-occurrence patterns from training data
2. **Mode Collapse**: When asked to be "creative," LLMs sample from the distribution of "creative ideas" they've seen
3. **Relevance Trap**: Strong associations dominate weak ones (chair→furniture >> chair→marine biology)
4. **Prototype Bias**: Outputs gravitate toward category prototypes, not edge cases
---
## The Solution: Expert Perspective Transformation
### Theoretical Basis
Our approach draws from three key theoretical foundations:
#### 1. Semantic Distance Theory (Mednick, 1962)
> "Creative thinking involves connecting weakly related, remote concepts in semantic memory."
**Key insight**: Creativity correlates with semantic distance. The farther the conceptual "jump," the more creative the result.
**Our application**: Expert perspectives force semantic jumps that LLMs wouldn't naturally make.
```
Without Expert:
"Chair" → furniture, sitting, comfort, design
Semantic distance: SHORT
With Marine Biologist Expert:
"Chair" → underwater pressure, coral structure, buoyancy, bioluminescence
Semantic distance: LONG
Result: Novel ideas like "pressure-adaptive seating" or "coral-inspired structural support"
```
#### 2. Conceptual Blending Theory (Fauconnier & Turner, 2002)
> "Creative products emerge from blending elements of two input spaces into a novel integrated space."
**The blending process**:
1. Input Space 1: The target concept (e.g., "chair")
2. Input Space 2: The expert's domain knowledge (e.g., marine biology)
3. Generic Space: Abstract structure shared by both
4. Blended Space: Novel integration of elements from both inputs
**Our application**: Each expert provides a distinct input space for systematic blending.
```
┌─────────────────┐ ┌─────────────────┐
│ Input 1 │ │ Input 2 │
│ "Chair" │ │ Marine Biology │
│ - support │ │ - pressure │
│ - sitting │ │ - buoyancy │
│ - comfort │ │ - adaptation │
└────────┬────────┘ └────────┬────────┘
│ │
└───────────┬───────────┘
┌─────────────────────┐
│ Blended Space │
│ Novel Chair Ideas │
│ - pressure-adapt │
│ - buoyant support │
│ - bio-adaptive │
└─────────────────────┘
```
#### 3. Design Fixation Breaking (Jansson & Smith, 1991)
> "Design fixation is blind adherence to initial ideas, limiting creative output."
**Fixation occurs because**:
- Knowledge is organized around category prototypes
- Prototypes require less cognitive effort to access
- Initial examples anchor subsequent ideation
**Our application**: Expert perspectives act as "defixation triggers" by activating non-prototype knowledge.
```
Without Intervention:
Prototype: "standard four-legged chair"
Fixation: Variations on four-legged design
With Expert Intervention:
Archaeologist: "Ancient people sat differently..."
Dance Therapist: "Seating affects movement expression..."
Fixation Broken: Entirely new seating paradigms explored
```
---
## The Multi-Expert Aggregation Model
### From "Wisdom of Crowds" to "Inner Crowd"
Research shows that groups generate more diverse ideas because each member brings different perspectives. Our system simulates this "crowd wisdom" through multiple expert personas:
```
Traditional Crowd:
Person 1 → Ideas from perspective 1
Person 2 → Ideas from perspective 2
Person 3 → Ideas from perspective 3
Aggregation → Diverse idea pool
Our "Inner Crowd":
LLM + Expert 1 Persona → Ideas from perspective 1
LLM + Expert 2 Persona → Ideas from perspective 2
LLM + Expert 3 Persona → Ideas from perspective 3
Aggregation → Diverse idea pool (simulated crowd)
```
### Why Multiple Experts Work
1. **Coverage**: Different experts activate different semantic regions
2. **Redundancy Reduction**: Deduplication removes overlapping ideas
3. **Diversity by Design**: Expert selection can be optimized for maximum diversity
4. **Diminishing Returns**: Beyond ~4-6 experts, marginal diversity gains decrease
---
## The Complete Pipeline
### Stage 1: Attribute Decomposition
**Purpose**: Structure the problem space before creative exploration
```
Input: "Innovative chair design"
Output:
Categories: [Material, Function, Usage, User Group]
Material: [wood, metal, fabric, composite]
Function: [support, comfort, mobility, storage]
Usage: [office, home, outdoor, medical]
User Group: [children, elderly, professionals, athletes]
```
**Theoretical basis**: Structured decomposition prevents premature fixation on holistic solutions.
### Stage 2: Expert Team Generation
**Purpose**: Assemble diverse perspectives for maximum semantic coverage
```
Strategies:
1. LLM-Generated: Query-specific, prioritizes unconventional experts
2. Curated: Pre-selected high-quality occupations
3. External Sources: DBpedia, Wikidata for broad coverage
Diversity Optimization:
- Domain spread (arts, science, trades, services)
- Expertise level variation
- Cultural/geographic diversity
```
### Stage 3: Expert Transformation
**Purpose**: Apply each expert's perspective to each attribute
```
For each (attribute, expert) pair:
Input: "Chair comfort" + "Marine Biologist"
LLM Prompt:
"As a marine biologist, how might you reimagine
chair comfort using principles from your field?"
Output: Keywords + Descriptions
- "Pressure-distributed seating inspired by deep-sea fish"
- "Buoyancy-assisted support reducing pressure points"
```
### Stage 4: Deduplication
**Purpose**: Ensure idea set is truly diverse, not just numerous
```
Methods:
1. Embedding-based: Fast cosine similarity clustering
2. LLM-based: Semantic pairwise comparison (more accurate)
Output:
- Unique ideas grouped by similarity
- Representative idea selected from each cluster
- Diversity metrics computed
```
### Stage 5: Novelty Validation
**Purpose**: Ground novelty in real-world uniqueness
```
Process:
- Search patent databases for similar concepts
- Compute overlap scores
- Flag ideas with high existing coverage
Output:
- Novelty score per idea
- Patent overlap rate for idea set
```
---
## Testable Hypotheses
### H1: Semantic Diversity
> Multi-expert generation produces higher semantic diversity than single-expert or direct generation.
**Measurement**: Mean pairwise cosine distance between idea embeddings
### H2: Novelty
> Ideas from multi-expert generation have lower patent overlap than direct generation.
**Measurement**: Percentage of ideas with existing patent matches
### H3: Expert Count Effect
> Semantic diversity increases with expert count, with diminishing returns beyond 4-6 experts.
**Measurement**: Diversity vs. expert count curve
### H4: Expert Source Effect
> LLM-generated experts produce more unconventional ideas than curated/database experts.
**Measurement**: Semantic distance from query centroid
### H5: Fixation Breaking
> Multi-expert approach produces more ideas outside the top-3 semantic clusters than direct generation.
**Measurement**: Cluster distribution analysis
---
## Expected Contributions
1. **Theoretical**: Formalization of "semantic gravity" as LLM creativity limitation
2. **Methodological**: Expert-augmented ideation pipeline with evaluation framework
3. **Empirical**: Quantitative evidence for multi-expert creativity enhancement
4. **Practical**: Open-source system for innovation ideation
---
## Positioning Against Related Work
| Approach | Limitation | Our Advantage |
|----------|------------|---------------|
| Direct LLM generation | Semantic gravity, fixation | Expert-forced semantic jumps |
| Human brainstorming | Cognitive fatigue, social dynamics | Tireless LLM generation |
| PersonaFlow (2024) | Research-focused, no attribute structure | Product innovation, structured decomposition |
| PopBlends (2023) | Two-concept blending only | Multi-expert, multi-attribute blending |
| BILLY (2025) | Vector fusion less interpretable | Sequential generation, explicit control |