feat: Add experiments framework and novelty-driven agent loop

- Add complete experiments directory with pilot study infrastructure
  - 5 experimental conditions (direct, expert-only, attribute-only, full-pipeline, random-perspective)
  - Human assessment tool with React frontend and FastAPI backend
  - AUT flexibility analysis with jump signal detection
  - Result visualization and metrics computation

- Add novelty-driven agent loop module (experiments/novelty_loop/)
  - NoveltyDrivenTaskAgent with expert perspective perturbation
  - Three termination strategies: breakthrough, exhaust, coverage
  - Interactive CLI demo with colored output
  - Embedding-based novelty scoring

- Add DDC knowledge domain classification data (en/zh)
- Add CLAUDE.md project documentation
- Update research report with experiment findings

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-20 10:16:21 +08:00
parent 26a56a2a07
commit 43c025e060
81 changed files with 18766 additions and 2 deletions

View File

@@ -0,0 +1,23 @@
"""
Condition implementations for the 5-condition experiment.
C1: Direct generation (baseline)
C2: Expert-only (no attributes)
C3: Attribute-only (no experts)
C4: Full pipeline (attributes + experts)
C5: Random-perspective (random words instead of experts)
"""
from .c1_direct import generate_ideas as c1_generate
from .c2_expert_only import generate_ideas as c2_generate
from .c3_attribute_only import generate_ideas as c3_generate
from .c4_full_pipeline import generate_ideas as c4_generate
from .c5_random_perspective import generate_ideas as c5_generate
__all__ = [
"c1_generate",
"c2_generate",
"c3_generate",
"c4_generate",
"c5_generate",
]

View File

@@ -0,0 +1,111 @@
"""
Condition 1: Direct Generation (Baseline)
Single LLM call asking for creative ideas directly.
No attribute decomposition, no expert perspectives.
"""
import sys
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from experiments.config import MODEL, TEMPERATURE, IDEAS_DIRECT, PROMPT_LANGUAGE
def get_direct_generation_prompt(query: str, idea_count: int, lang: str = "en") -> str:
"""Generate prompt for direct idea generation."""
if lang == "en":
return f"""/no_think
Generate {idea_count} creative and innovative ideas for "{query}".
Requirements:
1. Each idea should be specific and actionable
2. Ideas should be diverse, covering different aspects and applications
3. Include both practical improvements and creative innovations
4. Ideas should be 15-30 words each
Return JSON only:
{{"ideas": ["idea 1", "idea 2", "idea 3", ...]}}
Generate exactly {idea_count} ideas."""
else:
return f"""/no_think
為「{query}」生成 {idea_count} 個創意點子。
要求:
1. 每個點子要具體可行
2. 點子要多元,涵蓋不同面向和應用
3. 包含實用改進和創意創新
4. 每個點子 15-30 字
只回傳 JSON
{{"ideas": ["點子1", "點子2", "點子3", ...]}}
生成正好 {idea_count} 個點子。"""
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
idea_count: int = None,
lang: str = None
) -> Dict[str, Any]:
"""
Generate ideas using direct LLM generation (C1 baseline).
Args:
query: The object/concept to generate ideas for
model: LLM model to use (default from config)
temperature: Generation temperature (default from config)
idea_count: Number of ideas to generate (default from config)
lang: Language for prompts (default from config)
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
idea_count = idea_count or IDEAS_DIRECT
lang = lang or PROMPT_LANGUAGE
prompt = get_direct_generation_prompt(query, idea_count, lang)
response = await ollama_provider.generate(
prompt=prompt,
model=model,
temperature=temperature
)
result = extract_json_from_response(response)
ideas = result.get("ideas", [])
return {
"condition": "c1_direct",
"query": query,
"ideas": ideas,
"idea_count": len(ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"mechanism": "direct_llm_generation"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas:")
for i, idea in enumerate(result['ideas'], 1):
print(f" {i}. {idea}")
asyncio.run(test())

View File

@@ -0,0 +1,176 @@
"""
Condition 2: Expert-Only (No Attributes)
Uses expert perspectives to generate ideas, but without
attribute decomposition. Each expert generates ideas directly
for the query from their professional perspective.
"""
import sys
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from app.services.expert_source_service import expert_source_service
from experiments.config import (
MODEL, TEMPERATURE, EXPERT_COUNT, EXPERT_SOURCE,
IDEAS_PER_EXPERT, PROMPT_LANGUAGE
)
def get_expert_idea_generation_prompt(
query: str,
expert_name: str,
expert_domain: str,
idea_count: int,
lang: str = "en"
) -> str:
"""Generate prompt for expert-based idea generation."""
if lang == "en":
domain_text = f" ({expert_domain} field)" if expert_domain else ""
return f"""/no_think
You are a {expert_name}{domain_text}.
Task: Generate {idea_count} creative and innovative ideas for "{query}" from your professional perspective.
Requirements:
1. Each idea should reflect your professional expertise and unique viewpoint
2. Think about how concepts from your field could improve or reimagine "{query}"
3. Ideas should be specific and actionable (15-30 words each)
4. Combine your professional knowledge with creative thinking
Return JSON only:
{{"ideas": ["idea 1", "idea 2", "idea 3", ...]}}
Generate exactly {idea_count} ideas from your perspective as a {expert_name}."""
else:
domain_text = f"{expert_domain}領域)" if expert_domain else ""
return f"""/no_think
你是一位{expert_name}{domain_text}
任務:從你的專業角度,為「{query}」生成 {idea_count} 個創意點子。
要求:
1. 每個點子要反映你的專業知識和獨特觀點
2. 思考你領域的概念如何改進或重新想像「{query}
3. 點子要具體可行(每個 15-30 字)
4. 結合專業知識和創意思維
只回傳 JSON
{{"ideas": ["點子1", "點子2", "點子3", ...]}}
從你作為{expert_name}的角度生成正好 {idea_count} 個點子。"""
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
expert_count: int = None,
expert_source: str = None,
ideas_per_expert: int = None,
lang: str = None
) -> Dict[str, Any]:
"""
Generate ideas using expert perspectives only (C2).
Args:
query: The object/concept to generate ideas for
model: LLM model to use
temperature: Generation temperature
expert_count: Number of experts to use
expert_source: Source of experts (curated, dbpedia, etc.)
ideas_per_expert: Ideas each expert generates
lang: Language for prompts
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
expert_count = expert_count or EXPERT_COUNT
expert_source = expert_source or EXPERT_SOURCE
ideas_per_expert = ideas_per_expert or IDEAS_PER_EXPERT
lang = lang or PROMPT_LANGUAGE
# Get experts from curated source
experts, actual_source = expert_source_service.get_experts(
source=expert_source,
count=expert_count,
language=lang
)
all_ideas = []
expert_details = []
for expert in experts:
expert_name = expert.get("name", "Expert")
expert_domain = expert.get("domain", "")
prompt = get_expert_idea_generation_prompt(
query=query,
expert_name=expert_name,
expert_domain=expert_domain,
idea_count=ideas_per_expert,
lang=lang
)
response = await ollama_provider.generate(
prompt=prompt,
model=model,
temperature=temperature
)
result = extract_json_from_response(response)
ideas = result.get("ideas", [])
# Tag ideas with expert source
for idea in ideas:
all_ideas.append({
"idea": idea,
"expert_name": expert_name,
"expert_domain": expert_domain
})
expert_details.append({
"name": expert_name,
"domain": expert_domain,
"ideas_generated": len(ideas)
})
return {
"condition": "c2_expert_only",
"query": query,
"ideas": [item["idea"] for item in all_ideas],
"ideas_with_source": all_ideas,
"idea_count": len(all_ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"expert_count": expert_count,
"expert_source": actual_source,
"ideas_per_expert": ideas_per_expert,
"experts": expert_details,
"mechanism": "expert_perspectives_only"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas from {len(result['metadata']['experts'])} experts:")
for exp in result['metadata']['experts']:
print(f" - {exp['name']}: {exp['ideas_generated']} ideas")
print("\nSample ideas:")
for i, item in enumerate(result['ideas_with_source'][:5], 1):
print(f" {i}. [{item['expert_name']}] {item['idea']}")
asyncio.run(test())

View File

@@ -0,0 +1,181 @@
"""
Condition 3: Attribute-Only (No Experts)
Uses attribute decomposition to break down the query into
structured categories, then generates ideas from each attribute.
No expert perspectives involved.
"""
import sys
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from app.prompts.attribute_prompt import get_step1_dynamic_attributes_prompt
from experiments.config import (
MODEL, TEMPERATURE, FIXED_CATEGORIES, PROMPT_LANGUAGE
)
def get_attribute_idea_generation_prompt(
query: str,
category: str,
attribute: str,
idea_count: int,
lang: str = "en"
) -> str:
"""Generate prompt for attribute-based idea generation."""
if lang == "en":
return f"""/no_think
Generate {idea_count} creative ideas for "{query}" focusing on the attribute "{attribute}" (Category: {category}).
Requirements:
1. Each idea should be directly inspired by the attribute "{attribute}"
2. Think about how this attribute could be improved, reimagined, or applied in new ways
3. Ideas should be specific and actionable (15-30 words each)
4. Be creative while maintaining relevance to the attribute
Return JSON only:
{{"ideas": ["idea 1", "idea 2", ...]}}
Generate exactly {idea_count} ideas based on the attribute "{attribute}"."""
else:
return f"""/no_think
為「{query}」生成 {idea_count} 個創意點子,聚焦於屬性「{attribute}」(類別:{category})。
要求:
1. 每個點子要直接受屬性「{attribute}」啟發
2. 思考如何改進、重新想像或以新方式應用這個屬性
3. 點子要具體可行(每個 15-30 字)
4. 保持創意同時與屬性相關
只回傳 JSON
{{"ideas": ["點子1", "點子2", ...]}}
基於屬性「{attribute}」生成正好 {idea_count} 個點子。"""
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
categories: List[str] = None,
ideas_per_attribute: int = 1,
lang: str = None
) -> Dict[str, Any]:
"""
Generate ideas using attribute decomposition only (C3).
Args:
query: The object/concept to generate ideas for
model: LLM model to use
temperature: Generation temperature
categories: Categories to use for decomposition
ideas_per_attribute: Ideas to generate per attribute
lang: Language for prompts
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
categories = categories or FIXED_CATEGORIES
lang = lang or PROMPT_LANGUAGE
# Step 1: Generate attributes using existing prompt
# Build category definitions for the prompt
category_defs = [
{"name": cat, "description": f"Related {cat.lower()} of the object", "order": i}
for i, cat in enumerate(categories)
]
attr_prompt = get_step1_dynamic_attributes_prompt(
query=query,
categories=category_defs,
lang=lang
)
attr_response = await ollama_provider.generate(
prompt=attr_prompt,
model=model,
temperature=temperature
)
attributes_by_category = extract_json_from_response(attr_response)
# Step 2: Generate ideas for each attribute
all_ideas = []
attribute_details = []
for category in categories:
attrs = attributes_by_category.get(category, [])
for attr in attrs:
prompt = get_attribute_idea_generation_prompt(
query=query,
category=category,
attribute=attr,
idea_count=ideas_per_attribute,
lang=lang
)
response = await ollama_provider.generate(
prompt=prompt,
model=model,
temperature=temperature
)
result = extract_json_from_response(response)
ideas = result.get("ideas", [])
# Tag ideas with attribute source
for idea in ideas:
all_ideas.append({
"idea": idea,
"category": category,
"attribute": attr
})
attribute_details.append({
"category": category,
"attribute": attr,
"ideas_generated": len(ideas)
})
return {
"condition": "c3_attribute_only",
"query": query,
"ideas": [item["idea"] for item in all_ideas],
"ideas_with_source": all_ideas,
"idea_count": len(all_ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"categories": categories,
"attributes_by_category": attributes_by_category,
"attribute_count": sum(len(v) for v in attributes_by_category.values()),
"ideas_per_attribute": ideas_per_attribute,
"attributes": attribute_details,
"mechanism": "attribute_decomposition_only"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas from {result['metadata']['attribute_count']} attributes:")
for cat, attrs in result['metadata']['attributes_by_category'].items():
print(f" {cat}: {', '.join(attrs)}")
print("\nSample ideas:")
for i, item in enumerate(result['ideas_with_source'][:5], 1):
print(f" {i}. [{item['category']}/{item['attribute']}] {item['idea']}")
asyncio.run(test())

View File

@@ -0,0 +1,214 @@
"""
Condition 4: Full Pipeline (Attributes + Experts)
The complete novelty-seeking system:
1. Attribute decomposition into categories
2. Expert team generation
3. Expert keyword generation for each attribute
4. Description generation for each keyword
"""
import sys
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from app.services.expert_source_service import expert_source_service
from app.prompts.attribute_prompt import get_step1_dynamic_attributes_prompt
from app.prompts.expert_transformation_prompt import (
get_expert_keyword_generation_prompt,
get_single_description_prompt
)
from experiments.config import (
MODEL, TEMPERATURE, FIXED_CATEGORIES, EXPERT_COUNT,
EXPERT_SOURCE, KEYWORDS_PER_EXPERT, PROMPT_LANGUAGE
)
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
categories: List[str] = None,
expert_count: int = None,
expert_source: str = None,
keywords_per_expert: int = None,
lang: str = None
) -> Dict[str, Any]:
"""
Generate ideas using the full pipeline (C4).
Args:
query: The object/concept to generate ideas for
model: LLM model to use
temperature: Generation temperature
categories: Categories for attribute decomposition
expert_count: Number of experts
expert_source: Source of experts
keywords_per_expert: Keywords each expert generates per attribute
lang: Language for prompts
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
categories = categories or FIXED_CATEGORIES
expert_count = expert_count or EXPERT_COUNT
expert_source = expert_source or EXPERT_SOURCE
keywords_per_expert = keywords_per_expert or KEYWORDS_PER_EXPERT
lang = lang or PROMPT_LANGUAGE
# Step 0: Get experts from curated source
experts_data, actual_source = expert_source_service.get_experts(
source=expert_source,
count=expert_count,
language=lang
)
# Convert to expected format
experts = [
{
"id": f"expert-{i}",
"name": exp.get("name", "Expert"),
"domain": exp.get("domain", ""),
"perspective": exp.get("perspective", "")
}
for i, exp in enumerate(experts_data)
]
# Step 1: Generate attributes
category_defs = [
{"name": cat, "description": f"Related {cat.lower()} of the object", "order": i}
for i, cat in enumerate(categories)
]
attr_prompt = get_step1_dynamic_attributes_prompt(
query=query,
categories=category_defs,
lang=lang
)
attr_response = await ollama_provider.generate(
prompt=attr_prompt,
model=model,
temperature=temperature
)
attributes_by_category = extract_json_from_response(attr_response)
# Step 2: Expert keyword generation for each category/attribute
all_keywords = []
for category in categories:
attrs = attributes_by_category.get(category, [])
for attr in attrs:
# Generate keywords from all experts for this attribute
keyword_prompt = get_expert_keyword_generation_prompt(
category=category,
attribute=attr,
experts=experts,
keywords_per_expert=keywords_per_expert,
lang=lang
)
keyword_response = await ollama_provider.generate(
prompt=keyword_prompt,
model=model,
temperature=temperature
)
keyword_result = extract_json_from_response(keyword_response)
keywords = keyword_result.get("keywords", [])
for kw in keywords:
all_keywords.append({
"category": category,
"attribute": attr,
"keyword": kw.get("keyword", ""),
"expert_id": kw.get("expert_id", ""),
"expert_name": kw.get("expert_name", "")
})
# Step 3: Generate descriptions for each keyword
all_ideas = []
for kw_info in all_keywords:
# Find expert details
expert = next(
(e for e in experts if e["id"] == kw_info["expert_id"]),
{"name": kw_info["expert_name"], "domain": "", "id": kw_info["expert_id"]}
)
desc_prompt = get_single_description_prompt(
query=query,
keyword=kw_info["keyword"],
expert_id=expert["id"],
expert_name=expert["name"],
expert_domain=expert.get("domain", ""),
lang=lang
)
desc_response = await ollama_provider.generate(
prompt=desc_prompt,
model=model,
temperature=temperature
)
desc_result = extract_json_from_response(desc_response)
description = desc_result.get("description", "")
all_ideas.append({
"idea": description,
"keyword": kw_info["keyword"],
"category": kw_info["category"],
"attribute": kw_info["attribute"],
"expert_name": expert["name"],
"expert_domain": expert.get("domain", "")
})
return {
"condition": "c4_full_pipeline",
"query": query,
"ideas": [item["idea"] for item in all_ideas],
"ideas_with_source": all_ideas,
"idea_count": len(all_ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"categories": categories,
"attributes_by_category": attributes_by_category,
"attribute_count": sum(len(v) for v in attributes_by_category.values()),
"expert_count": expert_count,
"expert_source": actual_source,
"keywords_per_expert": keywords_per_expert,
"total_keywords": len(all_keywords),
"experts": [{"name": e["name"], "domain": e["domain"]} for e in experts],
"mechanism": "full_pipeline_attributes_plus_experts"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas using full pipeline:")
print(f" Attributes: {result['metadata']['attribute_count']}")
print(f" Experts: {result['metadata']['expert_count']}")
print(f" Keywords: {result['metadata']['total_keywords']}")
print("\nExperts used:")
for exp in result['metadata']['experts']:
print(f" - {exp['name']} ({exp['domain']})")
print("\nSample ideas:")
for i, item in enumerate(result['ideas_with_source'][:5], 1):
print(f" {i}. [{item['expert_name']}] {item['keyword']}: {item['idea']}")
asyncio.run(test())

View File

@@ -0,0 +1,178 @@
"""
Condition 5: Random-Perspective Control
Uses random words as "perspectives" instead of domain experts.
Tests whether the benefit from expert perspectives comes from
domain knowledge or simply from any perspective shift.
"""
import sys
import json
import random
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from experiments.config import (
MODEL, TEMPERATURE, EXPERT_COUNT, IDEAS_PER_EXPERT,
PROMPT_LANGUAGE, RANDOM_SEED, DATA_DIR
)
def load_random_words() -> List[str]:
"""Load the random word pool from data file."""
words_file = DATA_DIR / "random_words.json"
with open(words_file, "r", encoding="utf-8") as f:
data = json.load(f)
return data.get("words", [])
def get_random_perspective_prompt(
query: str,
perspective_word: str,
idea_count: int,
lang: str = "en"
) -> str:
"""Generate prompt for random-perspective idea generation."""
if lang == "en":
return f"""/no_think
Generate {idea_count} creative and innovative ideas for "{query}" inspired by the concept of "{perspective_word}".
Requirements:
1. Each idea should draw inspiration from "{perspective_word}" - its qualities, characteristics, or associations
2. Think about how concepts related to "{perspective_word}" could improve or reimagine "{query}"
3. Ideas should be specific and actionable (15-30 words each)
4. Be creative in connecting "{perspective_word}" to "{query}"
Return JSON only:
{{"ideas": ["idea 1", "idea 2", "idea 3", ...]}}
Generate exactly {idea_count} ideas inspired by "{perspective_word}"."""
else:
return f"""/no_think
為「{query}」生成 {idea_count} 個創意點子,靈感來自「{perspective_word}」這個概念。
要求:
1. 每個點子要從「{perspective_word}」獲得靈感——它的特質、特徵或聯想
2. 思考與「{perspective_word}」相關的概念如何改進或重新想像「{query}
3. 點子要具體可行(每個 15-30 字)
4. 創意地連接「{perspective_word}」和「{query}
只回傳 JSON
{{"ideas": ["點子1", "點子2", "點子3", ...]}}
生成正好 {idea_count} 個受「{perspective_word}」啟發的點子。"""
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
word_count: int = None,
ideas_per_word: int = None,
lang: str = None,
seed: int = None
) -> Dict[str, Any]:
"""
Generate ideas using random word perspectives (C5 control).
Args:
query: The object/concept to generate ideas for
model: LLM model to use
temperature: Generation temperature
word_count: Number of random words to use (matches expert count)
ideas_per_word: Ideas to generate per word
lang: Language for prompts
seed: Random seed for reproducibility
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
word_count = word_count or EXPERT_COUNT
ideas_per_word = ideas_per_word or IDEAS_PER_EXPERT
lang = lang or PROMPT_LANGUAGE
seed = seed or RANDOM_SEED
# Load word pool and sample random words
word_pool = load_random_words()
# Use seeded random for reproducibility
# Create a unique seed per query to get different words for different queries
# but same words for same query across runs
query_seed = seed + hash(query) % 10000
rng = random.Random(query_seed)
selected_words = rng.sample(word_pool, min(word_count, len(word_pool)))
all_ideas = []
word_details = []
for word in selected_words:
prompt = get_random_perspective_prompt(
query=query,
perspective_word=word,
idea_count=ideas_per_word,
lang=lang
)
response = await ollama_provider.generate(
prompt=prompt,
model=model,
temperature=temperature
)
result = extract_json_from_response(response)
ideas = result.get("ideas", [])
# Tag ideas with perspective word source
for idea in ideas:
all_ideas.append({
"idea": idea,
"perspective_word": word
})
word_details.append({
"word": word,
"ideas_generated": len(ideas)
})
return {
"condition": "c5_random_perspective",
"query": query,
"ideas": [item["idea"] for item in all_ideas],
"ideas_with_source": all_ideas,
"idea_count": len(all_ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"word_count": word_count,
"ideas_per_word": ideas_per_word,
"random_seed": seed,
"query_seed": query_seed,
"selected_words": selected_words,
"word_details": word_details,
"word_pool_size": len(word_pool),
"mechanism": "random_perspective_control"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas from {len(result['metadata']['selected_words'])} random words:")
print(f" Words used: {', '.join(result['metadata']['selected_words'])}")
print(f" Seed: {result['metadata']['random_seed']}, Query seed: {result['metadata']['query_seed']}")
print("\nSample ideas:")
for i, item in enumerate(result['ideas_with_source'][:5], 1):
print(f" {i}. [{item['perspective_word']}] {item['idea']}")
asyncio.run(test())