Files
novelty-seeking/experiments/conditions/c1_direct.py
gbanyan 43c025e060 feat: Add experiments framework and novelty-driven agent loop
- Add complete experiments directory with pilot study infrastructure
  - 5 experimental conditions (direct, expert-only, attribute-only, full-pipeline, random-perspective)
  - Human assessment tool with React frontend and FastAPI backend
  - AUT flexibility analysis with jump signal detection
  - Result visualization and metrics computation

- Add novelty-driven agent loop module (experiments/novelty_loop/)
  - NoveltyDrivenTaskAgent with expert perspective perturbation
  - Three termination strategies: breakthrough, exhaust, coverage
  - Interactive CLI demo with colored output
  - Embedding-based novelty scoring

- Add DDC knowledge domain classification data (en/zh)
- Add CLAUDE.md project documentation
- Update research report with experiment findings

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-20 10:16:21 +08:00

112 lines
3.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
Condition 1: Direct Generation (Baseline)
Single LLM call asking for creative ideas directly.
No attribute decomposition, no expert perspectives.
"""
import sys
from pathlib import Path
# Add backend to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "backend"))
from typing import List, Dict, Any
from app.services.llm_service import ollama_provider, extract_json_from_response
from experiments.config import MODEL, TEMPERATURE, IDEAS_DIRECT, PROMPT_LANGUAGE
def get_direct_generation_prompt(query: str, idea_count: int, lang: str = "en") -> str:
"""Generate prompt for direct idea generation."""
if lang == "en":
return f"""/no_think
Generate {idea_count} creative and innovative ideas for "{query}".
Requirements:
1. Each idea should be specific and actionable
2. Ideas should be diverse, covering different aspects and applications
3. Include both practical improvements and creative innovations
4. Ideas should be 15-30 words each
Return JSON only:
{{"ideas": ["idea 1", "idea 2", "idea 3", ...]}}
Generate exactly {idea_count} ideas."""
else:
return f"""/no_think
為「{query}」生成 {idea_count} 個創意點子。
要求:
1. 每個點子要具體可行
2. 點子要多元,涵蓋不同面向和應用
3. 包含實用改進和創意創新
4. 每個點子 15-30 字
只回傳 JSON
{{"ideas": ["點子1", "點子2", "點子3", ...]}}
生成正好 {idea_count} 個點子。"""
async def generate_ideas(
query: str,
model: str = None,
temperature: float = None,
idea_count: int = None,
lang: str = None
) -> Dict[str, Any]:
"""
Generate ideas using direct LLM generation (C1 baseline).
Args:
query: The object/concept to generate ideas for
model: LLM model to use (default from config)
temperature: Generation temperature (default from config)
idea_count: Number of ideas to generate (default from config)
lang: Language for prompts (default from config)
Returns:
Dict with ideas and metadata
"""
model = model or MODEL
temperature = temperature or TEMPERATURE
idea_count = idea_count or IDEAS_DIRECT
lang = lang or PROMPT_LANGUAGE
prompt = get_direct_generation_prompt(query, idea_count, lang)
response = await ollama_provider.generate(
prompt=prompt,
model=model,
temperature=temperature
)
result = extract_json_from_response(response)
ideas = result.get("ideas", [])
return {
"condition": "c1_direct",
"query": query,
"ideas": ideas,
"idea_count": len(ideas),
"metadata": {
"model": model,
"temperature": temperature,
"prompt_language": lang,
"mechanism": "direct_llm_generation"
}
}
# For testing
if __name__ == "__main__":
import asyncio
async def test():
result = await generate_ideas("Chair")
print(f"Generated {result['idea_count']} ideas:")
for i, idea in enumerate(result['ideas'], 1):
print(f" {i}. {idea}")
asyncio.run(test())