from datetime import datetime from typing import Any, ClassVar from uuid import UUID, uuid4 from sqlalchemy import JSON, Column from sqlmodel import Field, SQLModel from .base import utc_now from .domain import Domain class AICallLog(SQLModel, table=True): __tablename__ = "ai_call_logs" __domains__: ClassVar[frozenset[Domain]] = frozenset() id: UUID = Field(default_factory=uuid4, primary_key=True) created_at: datetime = Field(default_factory=utc_now, nullable=False) endpoint: str = Field(index=True) model: str system_prompt: str | None = Field(default=None) user_input: str | None = Field(default=None) response_text: str | None = Field(default=None) prompt_tokens: int | None = Field(default=None) completion_tokens: int | None = Field(default=None) total_tokens: int | None = Field(default=None) duration_ms: int | None = Field(default=None) finish_reason: str | None = Field(default=None) tool_trace: dict[str, Any] | None = Field( default=None, sa_column=Column(JSON, nullable=True), ) success: bool = Field(default=True, index=True) error_detail: str | None = Field(default=None) # Validation fields (Phase 1) validation_errors: list[str] | None = Field( default=None, sa_column=Column(JSON, nullable=True), ) validation_warnings: list[str] | None = Field( default=None, sa_column=Column(JSON, nullable=True), ) auto_fixed: bool = Field(default=False) # Reasoning capture (Phase 2) reasoning_chain: str | None = Field( default=None, description="LLM reasoning/thinking process (MEDIUM thinking level)", ) # Enhanced token metrics (Phase 2 - Gemini API detailed breakdown) thoughts_tokens: int | None = Field( default=None, description="Thinking tokens (thoughtsTokenCount) - separate from output budget", ) tool_use_prompt_tokens: int | None = Field( default=None, description="Tool use prompt tokens (toolUsePromptTokenCount)", ) cached_content_tokens: int | None = Field( default=None, description="Cached content tokens (cachedContentTokenCount)", )