feat: Add experiments framework and novelty-driven agent loop
- Add complete experiments directory with pilot study infrastructure - 5 experimental conditions (direct, expert-only, attribute-only, full-pipeline, random-perspective) - Human assessment tool with React frontend and FastAPI backend - AUT flexibility analysis with jump signal detection - Result visualization and metrics computation - Add novelty-driven agent loop module (experiments/novelty_loop/) - NoveltyDrivenTaskAgent with expert perspective perturbation - Three termination strategies: breakthrough, exhaust, coverage - Interactive CLI demo with colored output - Embedding-based novelty scoring - Add DDC knowledge domain classification data (en/zh) - Add CLAUDE.md project documentation - Update research report with experiment findings Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
176
experiments/conditions/c2_expert_only.py
Normal file
176
experiments/conditions/c2_expert_only.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""
|
||||
Condition 2: Expert-Only (No Attributes)
|
||||
|
||||
Uses expert perspectives to generate ideas, but without
|
||||
attribute decomposition. Each expert generates ideas directly
|
||||
for the query from their professional perspective.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add backend to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from app.services.llm_service import ollama_provider, extract_json_from_response
|
||||
from app.services.expert_source_service import expert_source_service
|
||||
from experiments.config import (
|
||||
MODEL, TEMPERATURE, EXPERT_COUNT, EXPERT_SOURCE,
|
||||
IDEAS_PER_EXPERT, PROMPT_LANGUAGE
|
||||
)
|
||||
|
||||
|
||||
def get_expert_idea_generation_prompt(
|
||||
query: str,
|
||||
expert_name: str,
|
||||
expert_domain: str,
|
||||
idea_count: int,
|
||||
lang: str = "en"
|
||||
) -> str:
|
||||
"""Generate prompt for expert-based idea generation."""
|
||||
if lang == "en":
|
||||
domain_text = f" ({expert_domain} field)" if expert_domain else ""
|
||||
return f"""/no_think
|
||||
You are a {expert_name}{domain_text}.
|
||||
|
||||
Task: Generate {idea_count} creative and innovative ideas for "{query}" from your professional perspective.
|
||||
|
||||
Requirements:
|
||||
1. Each idea should reflect your professional expertise and unique viewpoint
|
||||
2. Think about how concepts from your field could improve or reimagine "{query}"
|
||||
3. Ideas should be specific and actionable (15-30 words each)
|
||||
4. Combine your professional knowledge with creative thinking
|
||||
|
||||
Return JSON only:
|
||||
{{"ideas": ["idea 1", "idea 2", "idea 3", ...]}}
|
||||
|
||||
Generate exactly {idea_count} ideas from your perspective as a {expert_name}."""
|
||||
else:
|
||||
domain_text = f"({expert_domain}領域)" if expert_domain else ""
|
||||
return f"""/no_think
|
||||
你是一位{expert_name}{domain_text}。
|
||||
|
||||
任務:從你的專業角度,為「{query}」生成 {idea_count} 個創意點子。
|
||||
|
||||
要求:
|
||||
1. 每個點子要反映你的專業知識和獨特觀點
|
||||
2. 思考你領域的概念如何改進或重新想像「{query}」
|
||||
3. 點子要具體可行(每個 15-30 字)
|
||||
4. 結合專業知識和創意思維
|
||||
|
||||
只回傳 JSON:
|
||||
{{"ideas": ["點子1", "點子2", "點子3", ...]}}
|
||||
|
||||
從你作為{expert_name}的角度生成正好 {idea_count} 個點子。"""
|
||||
|
||||
|
||||
async def generate_ideas(
|
||||
query: str,
|
||||
model: str = None,
|
||||
temperature: float = None,
|
||||
expert_count: int = None,
|
||||
expert_source: str = None,
|
||||
ideas_per_expert: int = None,
|
||||
lang: str = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate ideas using expert perspectives only (C2).
|
||||
|
||||
Args:
|
||||
query: The object/concept to generate ideas for
|
||||
model: LLM model to use
|
||||
temperature: Generation temperature
|
||||
expert_count: Number of experts to use
|
||||
expert_source: Source of experts (curated, dbpedia, etc.)
|
||||
ideas_per_expert: Ideas each expert generates
|
||||
lang: Language for prompts
|
||||
|
||||
Returns:
|
||||
Dict with ideas and metadata
|
||||
"""
|
||||
model = model or MODEL
|
||||
temperature = temperature or TEMPERATURE
|
||||
expert_count = expert_count or EXPERT_COUNT
|
||||
expert_source = expert_source or EXPERT_SOURCE
|
||||
ideas_per_expert = ideas_per_expert or IDEAS_PER_EXPERT
|
||||
lang = lang or PROMPT_LANGUAGE
|
||||
|
||||
# Get experts from curated source
|
||||
experts, actual_source = expert_source_service.get_experts(
|
||||
source=expert_source,
|
||||
count=expert_count,
|
||||
language=lang
|
||||
)
|
||||
|
||||
all_ideas = []
|
||||
expert_details = []
|
||||
|
||||
for expert in experts:
|
||||
expert_name = expert.get("name", "Expert")
|
||||
expert_domain = expert.get("domain", "")
|
||||
|
||||
prompt = get_expert_idea_generation_prompt(
|
||||
query=query,
|
||||
expert_name=expert_name,
|
||||
expert_domain=expert_domain,
|
||||
idea_count=ideas_per_expert,
|
||||
lang=lang
|
||||
)
|
||||
|
||||
response = await ollama_provider.generate(
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
temperature=temperature
|
||||
)
|
||||
|
||||
result = extract_json_from_response(response)
|
||||
ideas = result.get("ideas", [])
|
||||
|
||||
# Tag ideas with expert source
|
||||
for idea in ideas:
|
||||
all_ideas.append({
|
||||
"idea": idea,
|
||||
"expert_name": expert_name,
|
||||
"expert_domain": expert_domain
|
||||
})
|
||||
|
||||
expert_details.append({
|
||||
"name": expert_name,
|
||||
"domain": expert_domain,
|
||||
"ideas_generated": len(ideas)
|
||||
})
|
||||
|
||||
return {
|
||||
"condition": "c2_expert_only",
|
||||
"query": query,
|
||||
"ideas": [item["idea"] for item in all_ideas],
|
||||
"ideas_with_source": all_ideas,
|
||||
"idea_count": len(all_ideas),
|
||||
"metadata": {
|
||||
"model": model,
|
||||
"temperature": temperature,
|
||||
"prompt_language": lang,
|
||||
"expert_count": expert_count,
|
||||
"expert_source": actual_source,
|
||||
"ideas_per_expert": ideas_per_expert,
|
||||
"experts": expert_details,
|
||||
"mechanism": "expert_perspectives_only"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# For testing
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
async def test():
|
||||
result = await generate_ideas("Chair")
|
||||
print(f"Generated {result['idea_count']} ideas from {len(result['metadata']['experts'])} experts:")
|
||||
for exp in result['metadata']['experts']:
|
||||
print(f" - {exp['name']}: {exp['ideas_generated']} ideas")
|
||||
print("\nSample ideas:")
|
||||
for i, item in enumerate(result['ideas_with_source'][:5], 1):
|
||||
print(f" {i}. [{item['expert_name']}] {item['idea']}")
|
||||
|
||||
asyncio.run(test())
|
||||
Reference in New Issue
Block a user