- Add complete experiments directory with pilot study infrastructure - 5 experimental conditions (direct, expert-only, attribute-only, full-pipeline, random-perspective) - Human assessment tool with React frontend and FastAPI backend - AUT flexibility analysis with jump signal detection - Result visualization and metrics computation - Add novelty-driven agent loop module (experiments/novelty_loop/) - NoveltyDrivenTaskAgent with expert perspective perturbation - Three termination strategies: breakthrough, exhaust, coverage - Interactive CLI demo with colored output - Embedding-based novelty scoring - Add DDC knowledge domain classification data (en/zh) - Add CLAUDE.md project documentation - Update research report with experiment findings Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
182 lines
5.7 KiB
Python
182 lines
5.7 KiB
Python
"""
|
||
Condition 3: Attribute-Only (No Experts)
|
||
|
||
Uses attribute decomposition to break down the query into
|
||
structured categories, then generates ideas from each attribute.
|
||
No expert perspectives involved.
|
||
"""
|
||
|
||
import sys
|
||
from pathlib import Path
|
||
|
||
# Add backend to path for imports
|
||
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
|
||
|
||
from typing import List, Dict, Any
|
||
from app.services.llm_service import ollama_provider, extract_json_from_response
|
||
from app.prompts.attribute_prompt import get_step1_dynamic_attributes_prompt
|
||
from experiments.config import (
|
||
MODEL, TEMPERATURE, FIXED_CATEGORIES, PROMPT_LANGUAGE
|
||
)
|
||
|
||
|
||
def get_attribute_idea_generation_prompt(
|
||
query: str,
|
||
category: str,
|
||
attribute: str,
|
||
idea_count: int,
|
||
lang: str = "en"
|
||
) -> str:
|
||
"""Generate prompt for attribute-based idea generation."""
|
||
if lang == "en":
|
||
return f"""/no_think
|
||
Generate {idea_count} creative ideas for "{query}" focusing on the attribute "{attribute}" (Category: {category}).
|
||
|
||
Requirements:
|
||
1. Each idea should be directly inspired by the attribute "{attribute}"
|
||
2. Think about how this attribute could be improved, reimagined, or applied in new ways
|
||
3. Ideas should be specific and actionable (15-30 words each)
|
||
4. Be creative while maintaining relevance to the attribute
|
||
|
||
Return JSON only:
|
||
{{"ideas": ["idea 1", "idea 2", ...]}}
|
||
|
||
Generate exactly {idea_count} ideas based on the attribute "{attribute}"."""
|
||
else:
|
||
return f"""/no_think
|
||
為「{query}」生成 {idea_count} 個創意點子,聚焦於屬性「{attribute}」(類別:{category})。
|
||
|
||
要求:
|
||
1. 每個點子要直接受屬性「{attribute}」啟發
|
||
2. 思考如何改進、重新想像或以新方式應用這個屬性
|
||
3. 點子要具體可行(每個 15-30 字)
|
||
4. 保持創意同時與屬性相關
|
||
|
||
只回傳 JSON:
|
||
{{"ideas": ["點子1", "點子2", ...]}}
|
||
|
||
基於屬性「{attribute}」生成正好 {idea_count} 個點子。"""
|
||
|
||
|
||
async def generate_ideas(
|
||
query: str,
|
||
model: str = None,
|
||
temperature: float = None,
|
||
categories: List[str] = None,
|
||
ideas_per_attribute: int = 1,
|
||
lang: str = None
|
||
) -> Dict[str, Any]:
|
||
"""
|
||
Generate ideas using attribute decomposition only (C3).
|
||
|
||
Args:
|
||
query: The object/concept to generate ideas for
|
||
model: LLM model to use
|
||
temperature: Generation temperature
|
||
categories: Categories to use for decomposition
|
||
ideas_per_attribute: Ideas to generate per attribute
|
||
lang: Language for prompts
|
||
|
||
Returns:
|
||
Dict with ideas and metadata
|
||
"""
|
||
model = model or MODEL
|
||
temperature = temperature or TEMPERATURE
|
||
categories = categories or FIXED_CATEGORIES
|
||
lang = lang or PROMPT_LANGUAGE
|
||
|
||
# Step 1: Generate attributes using existing prompt
|
||
# Build category definitions for the prompt
|
||
category_defs = [
|
||
{"name": cat, "description": f"Related {cat.lower()} of the object", "order": i}
|
||
for i, cat in enumerate(categories)
|
||
]
|
||
|
||
attr_prompt = get_step1_dynamic_attributes_prompt(
|
||
query=query,
|
||
categories=category_defs,
|
||
lang=lang
|
||
)
|
||
|
||
attr_response = await ollama_provider.generate(
|
||
prompt=attr_prompt,
|
||
model=model,
|
||
temperature=temperature
|
||
)
|
||
|
||
attributes_by_category = extract_json_from_response(attr_response)
|
||
|
||
# Step 2: Generate ideas for each attribute
|
||
all_ideas = []
|
||
attribute_details = []
|
||
|
||
for category in categories:
|
||
attrs = attributes_by_category.get(category, [])
|
||
|
||
for attr in attrs:
|
||
prompt = get_attribute_idea_generation_prompt(
|
||
query=query,
|
||
category=category,
|
||
attribute=attr,
|
||
idea_count=ideas_per_attribute,
|
||
lang=lang
|
||
)
|
||
|
||
response = await ollama_provider.generate(
|
||
prompt=prompt,
|
||
model=model,
|
||
temperature=temperature
|
||
)
|
||
|
||
result = extract_json_from_response(response)
|
||
ideas = result.get("ideas", [])
|
||
|
||
# Tag ideas with attribute source
|
||
for idea in ideas:
|
||
all_ideas.append({
|
||
"idea": idea,
|
||
"category": category,
|
||
"attribute": attr
|
||
})
|
||
|
||
attribute_details.append({
|
||
"category": category,
|
||
"attribute": attr,
|
||
"ideas_generated": len(ideas)
|
||
})
|
||
|
||
return {
|
||
"condition": "c3_attribute_only",
|
||
"query": query,
|
||
"ideas": [item["idea"] for item in all_ideas],
|
||
"ideas_with_source": all_ideas,
|
||
"idea_count": len(all_ideas),
|
||
"metadata": {
|
||
"model": model,
|
||
"temperature": temperature,
|
||
"prompt_language": lang,
|
||
"categories": categories,
|
||
"attributes_by_category": attributes_by_category,
|
||
"attribute_count": sum(len(v) for v in attributes_by_category.values()),
|
||
"ideas_per_attribute": ideas_per_attribute,
|
||
"attributes": attribute_details,
|
||
"mechanism": "attribute_decomposition_only"
|
||
}
|
||
}
|
||
|
||
|
||
# For testing
|
||
if __name__ == "__main__":
|
||
import asyncio
|
||
|
||
async def test():
|
||
result = await generate_ideas("Chair")
|
||
print(f"Generated {result['idea_count']} ideas from {result['metadata']['attribute_count']} attributes:")
|
||
for cat, attrs in result['metadata']['attributes_by_category'].items():
|
||
print(f" {cat}: {', '.join(attrs)}")
|
||
print("\nSample ideas:")
|
||
for i, item in enumerate(result['ideas_with_source'][:5], 1):
|
||
print(f" {i}. [{item['category']}/{item['attribute']}] {item['idea']}")
|
||
|
||
asyncio.run(test())
|