- Add complete experiments directory with pilot study infrastructure - 5 experimental conditions (direct, expert-only, attribute-only, full-pipeline, random-perspective) - Human assessment tool with React frontend and FastAPI backend - AUT flexibility analysis with jump signal detection - Result visualization and metrics computation - Add novelty-driven agent loop module (experiments/novelty_loop/) - NoveltyDrivenTaskAgent with expert perspective perturbation - Three termination strategies: breakthrough, exhaust, coverage - Interactive CLI demo with colored output - Embedding-based novelty scoring - Add DDC knowledge domain classification data (en/zh) - Add CLAUDE.md project documentation - Update research report with experiment findings Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
43 lines
829 B
Python
43 lines
829 B
Python
"""
|
|
Novelty-Driven LLM Agent Loop
|
|
|
|
An autonomous agent that generates tasks using novelty as the termination condition.
|
|
"""
|
|
|
|
from .agent import (
|
|
NoveltyDrivenTaskAgent,
|
|
ExhaustFrontierAgent,
|
|
CoverageTargetAgent,
|
|
GeneratedTask,
|
|
TaskGenerationResult,
|
|
ExpertProvider,
|
|
DomainProvider,
|
|
)
|
|
|
|
from .novelty_metrics import (
|
|
NoveltyMetrics,
|
|
NoveltyScore,
|
|
NoveltyTrajectory,
|
|
compute_batch_novelty,
|
|
find_most_novel,
|
|
)
|
|
|
|
__all__ = [
|
|
# Agents
|
|
"NoveltyDrivenTaskAgent",
|
|
"ExhaustFrontierAgent",
|
|
"CoverageTargetAgent",
|
|
# Data classes
|
|
"GeneratedTask",
|
|
"TaskGenerationResult",
|
|
"NoveltyScore",
|
|
"NoveltyTrajectory",
|
|
# Providers
|
|
"ExpertProvider",
|
|
"DomainProvider",
|
|
# Metrics
|
|
"NoveltyMetrics",
|
|
"compute_batch_novelty",
|
|
"find_most_novel",
|
|
]
|