chore: save local changes
This commit is contained in:
@@ -58,7 +58,8 @@ async def execute_step0(
|
||||
prompt = get_step0_category_analysis_prompt(
|
||||
request.query,
|
||||
request.suggested_category_count,
|
||||
exclude_categories=exclude_categories
|
||||
exclude_categories=exclude_categories,
|
||||
lang=request.lang
|
||||
)
|
||||
temperature = request.temperature if request.temperature is not None else 0.7
|
||||
response = await ollama_provider.generate(
|
||||
@@ -310,7 +311,7 @@ async def generate_sse_events(request: StreamAnalyzeRequest) -> AsyncGenerator[s
|
||||
# ========== Step 1: Generate Attributes (Dynamic) ==========
|
||||
yield f"event: step1_start\ndata: {json.dumps({'message': '生成屬性...'}, ensure_ascii=False)}\n\n"
|
||||
|
||||
step1_prompt = get_step1_dynamic_attributes_prompt(request.query, final_categories)
|
||||
step1_prompt = get_step1_dynamic_attributes_prompt(request.query, final_categories, lang=request.lang)
|
||||
logger.info(f"Step 1 prompt: {step1_prompt[:200]}")
|
||||
|
||||
step1_response = await ollama_provider.generate(
|
||||
@@ -330,6 +331,7 @@ async def generate_sse_events(request: StreamAnalyzeRequest) -> AsyncGenerator[s
|
||||
query=request.query,
|
||||
categories=final_categories,
|
||||
attributes_by_category=step1_result.attributes,
|
||||
lang=request.lang
|
||||
)
|
||||
logger.info(f"Step 2 (relationships) prompt: {step2_prompt[:300]}")
|
||||
|
||||
|
||||
@@ -63,7 +63,8 @@ async def deduplicate_descriptions(request: DeduplicationRequest) -> Deduplicati
|
||||
# 使用 LLM 成對比較去重
|
||||
result = await llm_deduplication_service.deduplicate(
|
||||
descriptions=request.descriptions,
|
||||
model=request.model
|
||||
model=request.model,
|
||||
lang=request.lang
|
||||
)
|
||||
return result
|
||||
except ValueError as e:
|
||||
|
||||
@@ -68,7 +68,8 @@ async def generate_expert_transformation_events(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None,
|
||||
lang=request.lang
|
||||
)
|
||||
logger.info(f"Expert prompt: {expert_prompt[:200]}")
|
||||
|
||||
@@ -119,7 +120,8 @@ async def generate_expert_transformation_events(
|
||||
query=request.query,
|
||||
categories=all_categories,
|
||||
expert_count=request.expert_count,
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None
|
||||
custom_experts=actual_custom_experts if actual_custom_experts else None,
|
||||
lang=request.lang
|
||||
)
|
||||
|
||||
expert_response = await ollama_provider.generate(
|
||||
@@ -160,7 +162,8 @@ async def generate_expert_transformation_events(
|
||||
category=request.category,
|
||||
attribute=attribute,
|
||||
experts=[e.model_dump() for e in experts],
|
||||
keywords_per_expert=request.keywords_per_expert
|
||||
keywords_per_expert=request.keywords_per_expert,
|
||||
lang=request.lang
|
||||
)
|
||||
logger.info(f"Keyword prompt for '{attribute}': {kw_prompt[:300]}")
|
||||
|
||||
@@ -214,7 +217,8 @@ async def generate_expert_transformation_events(
|
||||
keyword=kw.keyword,
|
||||
expert_id=kw.expert_id,
|
||||
expert_name=kw.expert_name,
|
||||
expert_domain=expert_domain
|
||||
expert_domain=expert_domain,
|
||||
lang=request.lang
|
||||
)
|
||||
|
||||
desc_response = await ollama_provider.generate(
|
||||
|
||||
133
backend/app/routers/patent_search.py
Normal file
133
backend/app/routers/patent_search.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Patent Search Router - Search for similar patents"""
|
||||
|
||||
import logging
|
||||
from typing import Optional, List
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..services.patent_search_service import patent_search_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/patent", tags=["patent"])
|
||||
|
||||
|
||||
# ===== Request/Response Models =====
|
||||
|
||||
class PatentSearchRequest(BaseModel):
|
||||
"""Patent search request"""
|
||||
query: str # Search query (description or keywords)
|
||||
max_results: int = 10 # Maximum results to return (1-20)
|
||||
|
||||
|
||||
class PatentResult(BaseModel):
|
||||
"""Single patent result"""
|
||||
publication_number: str
|
||||
title: str
|
||||
snippet: str
|
||||
publication_date: Optional[str] = None
|
||||
assignee: Optional[str] = None
|
||||
inventor: Optional[str] = None
|
||||
status: str # ACTIVE, NOT_ACTIVE, UNKNOWN
|
||||
pdf_url: Optional[str] = None
|
||||
thumbnail_url: Optional[str] = None
|
||||
|
||||
|
||||
class PatentSearchResponse(BaseModel):
|
||||
"""Patent search response"""
|
||||
query: str
|
||||
total_results: int
|
||||
patents: List[PatentResult]
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class BatchPatentSearchRequest(BaseModel):
|
||||
"""Batch patent search request - search multiple descriptions"""
|
||||
queries: List[str] # List of descriptions to search
|
||||
max_results_per_query: int = 5 # Max results per query
|
||||
|
||||
|
||||
class BatchPatentSearchResult(BaseModel):
|
||||
"""Results for a single query in batch search"""
|
||||
query: str
|
||||
total_results: int
|
||||
patents: List[PatentResult]
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class BatchPatentSearchResponse(BaseModel):
|
||||
"""Batch patent search response"""
|
||||
results: List[BatchPatentSearchResult]
|
||||
total_queries: int
|
||||
|
||||
|
||||
# ===== Endpoints =====
|
||||
|
||||
@router.post("/search", response_model=PatentSearchResponse)
|
||||
async def search_patents(request: PatentSearchRequest):
|
||||
"""
|
||||
Search for patents similar to the given description/query.
|
||||
|
||||
Uses Google Patents to find related patents based on keywords.
|
||||
"""
|
||||
logger.info(f"Patent search request: {request.query[:100]}...")
|
||||
|
||||
# Limit max_results to reasonable range
|
||||
max_results = min(max(1, request.max_results), 20)
|
||||
|
||||
result = await patent_search_service.search(
|
||||
query=request.query,
|
||||
max_results=max_results,
|
||||
)
|
||||
|
||||
return PatentSearchResponse(
|
||||
query=request.query,
|
||||
total_results=result.get("total_results", 0),
|
||||
patents=[PatentResult(**p) for p in result.get("patents", [])],
|
||||
error=result.get("error"),
|
||||
)
|
||||
|
||||
|
||||
@router.post("/search/batch", response_model=BatchPatentSearchResponse)
|
||||
async def batch_search_patents(request: BatchPatentSearchRequest):
|
||||
"""
|
||||
Search for patents for multiple descriptions at once.
|
||||
|
||||
Useful for checking multiple creative descriptions against patents.
|
||||
"""
|
||||
logger.info(f"Batch patent search: {len(request.queries)} queries")
|
||||
|
||||
# Limit results per query
|
||||
max_per_query = min(max(1, request.max_results_per_query), 10)
|
||||
|
||||
results: List[BatchPatentSearchResult] = []
|
||||
|
||||
for query in request.queries:
|
||||
result = await patent_search_service.search(
|
||||
query=query,
|
||||
max_results=max_per_query,
|
||||
)
|
||||
|
||||
results.append(BatchPatentSearchResult(
|
||||
query=query,
|
||||
total_results=result.get("total_results", 0),
|
||||
patents=[PatentResult(**p) for p in result.get("patents", [])],
|
||||
error=result.get("error"),
|
||||
))
|
||||
|
||||
return BatchPatentSearchResponse(
|
||||
results=results,
|
||||
total_queries=len(request.queries),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def patent_search_health():
|
||||
"""Check if patent search service is working"""
|
||||
# Do a simple test search
|
||||
result = await patent_search_service.search("test", max_results=1)
|
||||
|
||||
if result.get("error"):
|
||||
return {"status": "unhealthy", "error": result["error"]}
|
||||
|
||||
return {"status": "healthy"}
|
||||
@@ -36,7 +36,8 @@ async def generate_transformation_events(
|
||||
keyword_prompt = get_keyword_generation_prompt(
|
||||
category=request.category,
|
||||
attributes=request.attributes,
|
||||
keyword_count=request.keyword_count
|
||||
keyword_count=request.keyword_count,
|
||||
lang=request.lang
|
||||
)
|
||||
logger.info(f"Keyword prompt: {keyword_prompt[:200]}")
|
||||
|
||||
@@ -61,7 +62,8 @@ async def generate_transformation_events(
|
||||
desc_prompt = get_batch_description_prompt(
|
||||
query=request.query,
|
||||
category=request.category,
|
||||
keywords=new_keywords
|
||||
keywords=new_keywords,
|
||||
lang=request.lang
|
||||
)
|
||||
logger.info(f"Description prompt: {desc_prompt[:300]}")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user