- Add tiered context system (summary/detailed/full) to reduce token usage by 70-80% - Replace old _build_products_context with build_products_context_summary_list (Tier 1: ~15 tokens/product vs 150) - Optimize function tool responses: exclude INCI list by default (saves ~15KB/product) - Reduce actives from 24 to top 5 in function tools - Add reasoning_chain field to AICallLog model for observability - Implement _extract_thinking_content to capture LLM reasoning (MEDIUM thinking level) - Strengthen prompt enforcement for prohibited fields (dose, amount, quantity) - Update get_creative_config to use MEDIUM thinking level instead of LOW Token Savings: - Routine suggestions: 9,613 → ~1,300 tokens (-86%) - Batch planning: 12,580 → ~1,800 tokens (-86%) - Function tool responses: ~15KB → ~2KB per product (-87%) Breaks discovered in log analysis (ai_call_log.json): - Lines 10, 27, 61, 78: LLM returned prohibited dose field - Line 85: MAX_TOKENS failure (output truncated) Phase 2 complete. Next: two-phase batch planning with safety verification.
50 lines
1.7 KiB
Python
50 lines
1.7 KiB
Python
from datetime import datetime
|
|
from typing import Any, ClassVar
|
|
from uuid import UUID, uuid4
|
|
|
|
from sqlalchemy import JSON, Column
|
|
from sqlmodel import Field, SQLModel
|
|
|
|
from .base import utc_now
|
|
from .domain import Domain
|
|
|
|
|
|
class AICallLog(SQLModel, table=True):
|
|
__tablename__ = "ai_call_logs"
|
|
__domains__: ClassVar[frozenset[Domain]] = frozenset()
|
|
|
|
id: UUID = Field(default_factory=uuid4, primary_key=True)
|
|
created_at: datetime = Field(default_factory=utc_now, nullable=False)
|
|
endpoint: str = Field(index=True)
|
|
model: str
|
|
system_prompt: str | None = Field(default=None)
|
|
user_input: str | None = Field(default=None)
|
|
response_text: str | None = Field(default=None)
|
|
prompt_tokens: int | None = Field(default=None)
|
|
completion_tokens: int | None = Field(default=None)
|
|
total_tokens: int | None = Field(default=None)
|
|
duration_ms: int | None = Field(default=None)
|
|
finish_reason: str | None = Field(default=None)
|
|
tool_trace: dict[str, Any] | None = Field(
|
|
default=None,
|
|
sa_column=Column(JSON, nullable=True),
|
|
)
|
|
success: bool = Field(default=True, index=True)
|
|
error_detail: str | None = Field(default=None)
|
|
|
|
# Validation fields (Phase 1)
|
|
validation_errors: list[str] | None = Field(
|
|
default=None,
|
|
sa_column=Column(JSON, nullable=True),
|
|
)
|
|
validation_warnings: list[str] | None = Field(
|
|
default=None,
|
|
sa_column=Column(JSON, nullable=True),
|
|
)
|
|
auto_fixed: bool = Field(default=False)
|
|
|
|
# Reasoning capture (Phase 2)
|
|
reasoning_chain: str | None = Field(
|
|
default=None,
|
|
description="LLM reasoning/thinking process (MEDIUM thinking level)",
|
|
)
|