feat(routines): add minoxidil beard/mustache option to routine suggestions

- Add include_minoxidil_beard flag to SuggestRoutineRequest and SuggestBatchRequest
- Detect minoxidil products by scanning name, brand, INCI and actives; pass them
  to the LLM even though they are medications
- Inject CELE UŻYTKOWNIKA context block into prompts when flag is enabled
- Add _build_objectives_context() returning empty string when flag is off
- Add call_gemini() helper that centralises Gemini API calls and logs every
  request/response to a new ai_call_logs table (AICallLog model + /ai-logs router)
- Nginx: raise client_max_body_size to 16 MB for photo uploads

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Piotr Oleszczyk 2026-03-01 19:46:07 +01:00
parent 3aa03b412b
commit 75ef1bca56
15 changed files with 337 additions and 62 deletions

View file

@ -0,0 +1,51 @@
"""add_ai_call_logs
Revision ID: a1b2c3d4e5f6
Revises: c2d626a2b36c
Create Date: 2026-03-01 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
import sqlmodel.sql.sqltypes
from alembic import op
revision: str = "a1b2c3d4e5f6"
down_revision: Union[str, None] = "c2d626a2b36c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table(
"ai_call_logs",
sa.Column("id", sa.Uuid(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("endpoint", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("model", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("system_prompt", sa.Text(), nullable=True),
sa.Column("user_input", sa.Text(), nullable=True),
sa.Column("response_text", sa.Text(), nullable=True),
sa.Column("prompt_tokens", sa.Integer(), nullable=True),
sa.Column("completion_tokens", sa.Integer(), nullable=True),
sa.Column("total_tokens", sa.Integer(), nullable=True),
sa.Column("duration_ms", sa.Integer(), nullable=True),
sa.Column("success", sa.Boolean(), nullable=False),
sa.Column("error_detail", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_ai_call_logs_endpoint"), "ai_call_logs", ["endpoint"], unique=False
)
op.create_index(
op.f("ix_ai_call_logs_success"), "ai_call_logs", ["success"], unique=False
)
def downgrade() -> None:
op.drop_index(op.f("ix_ai_call_logs_success"), table_name="ai_call_logs")
op.drop_index(op.f("ix_ai_call_logs_endpoint"), table_name="ai_call_logs")
op.drop_table("ai_call_logs")

View file

@ -0,0 +1,48 @@
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException
from sqlmodel import Session, SQLModel, col, select
from db import get_session
from innercontext.models.ai_log import AICallLog
router = APIRouter()
class AICallLogPublic(SQLModel):
"""List-friendly view: omits large text fields."""
id: UUID
created_at: object
endpoint: str
model: str
prompt_tokens: Optional[int] = None
completion_tokens: Optional[int] = None
total_tokens: Optional[int] = None
duration_ms: Optional[int] = None
success: bool
error_detail: Optional[str] = None
@router.get("", response_model=list[AICallLogPublic])
def list_ai_logs(
endpoint: Optional[str] = None,
success: Optional[bool] = None,
limit: int = 50,
session: Session = Depends(get_session),
):
stmt = select(AICallLog).order_by(col(AICallLog.created_at).desc()).limit(limit)
if endpoint is not None:
stmt = stmt.where(AICallLog.endpoint == endpoint)
if success is not None:
stmt = stmt.where(AICallLog.success == success)
return session.exec(stmt).all()
@router.get("/{log_id}", response_model=AICallLog)
def get_ai_log(log_id: UUID, session: Session = Depends(get_session)):
log = session.get(AICallLog, log_id)
if log is None:
raise HTTPException(status_code=404, detail="Log not found")
return log

View file

@ -10,7 +10,7 @@ from sqlmodel import Session, SQLModel, select
from db import get_session
from innercontext.api.utils import get_or_404
from innercontext.llm import get_gemini_client
from innercontext.llm import call_gemini
from innercontext.models import (
Product,
ProductBase,
@ -367,9 +367,8 @@ OUTPUT SCHEMA (all fields optional — omit what you cannot determine):
@router.post("/parse-text", response_model=ProductParseResponse)
def parse_product_text(data: ProductParseRequest) -> ProductParseResponse:
client, model = get_gemini_client()
response = client.models.generate_content(
model=model,
response = call_gemini(
endpoint="products/parse-text",
contents=f"Extract product data from this text:\n\n{data.text}",
config=genai_types.GenerateContentConfig(
system_instruction=_product_parse_system_prompt(),
@ -378,6 +377,7 @@ def parse_product_text(data: ProductParseRequest) -> ProductParseResponse:
max_output_tokens=16384,
temperature=0.0,
),
user_input=data.text,
)
raw = response.text
if not raw:

View file

@ -10,7 +10,7 @@ from sqlmodel import Session, SQLModel, col, select
from db import get_session
from innercontext.api.utils import get_or_404
from innercontext.llm import get_gemini_client
from innercontext.llm import call_gemini
from innercontext.models import (
GroomingSchedule,
Product,
@ -82,6 +82,7 @@ class SuggestRoutineRequest(SQLModel):
routine_date: date
part_of_day: PartOfDay
notes: Optional[str] = None
include_minoxidil_beard: bool = False
class RoutineSuggestion(SQLModel):
@ -93,6 +94,7 @@ class SuggestBatchRequest(SQLModel):
from_date: date
to_date: date
notes: Optional[str] = None
include_minoxidil_beard: bool = False
class DayPlan(SQLModel):
@ -152,6 +154,36 @@ _DAY_NAMES = [
]
def _contains_minoxidil_text(value: Optional[str]) -> bool:
if not value:
return False
text = value.lower()
return "minoxidil" in text or "minoksydyl" in text
def _is_minoxidil_product(product: Product) -> bool:
if _contains_minoxidil_text(product.name):
return True
if _contains_minoxidil_text(product.brand):
return True
if _contains_minoxidil_text(product.line_name):
return True
if _contains_minoxidil_text(product.usage_notes):
return True
if any(_contains_minoxidil_text(i) for i in (product.inci or [])):
return True
actives = product.actives or []
for a in actives:
if isinstance(a, dict):
if _contains_minoxidil_text(str(a.get("name", ""))):
return True
continue
if _contains_minoxidil_text(a.name):
return True
return False
def _ev(v: object) -> str:
return (
v.value
@ -233,14 +265,12 @@ def _build_recent_history(session: Session) -> str:
def _build_products_context(session: Session, time_filter: Optional[str] = None) -> str:
stmt = (
select(Product)
.where(Product.is_medication == False) # noqa: E712
.where(Product.is_tool == False) # noqa: E712
)
stmt = select(Product).where(Product.is_tool == False) # noqa: E712
products = session.exec(stmt).all()
lines = ["DOSTĘPNE PRODUKTY:"]
for p in products:
if p.is_medication and not _is_minoxidil_product(p):
continue
if time_filter and _ev(p.recommended_time) not in (time_filter, "both"):
continue
ctx = p.to_llm_context()
@ -266,12 +296,24 @@ def _build_products_context(session: Session, time_filter: Optional[str] = None)
return "\n".join(lines) + "\n"
def _build_objectives_context(include_minoxidil_beard: bool) -> str:
if include_minoxidil_beard:
return (
"CELE UŻYTKOWNIKA:\n"
" - Priorytet: poprawa gęstości brody i wąsów\n"
" - Jeśli dostępny produkt z minoksydylem, uwzględnij go zgodnie z zasadami bezpieczeństwa\n"
)
return ""
_RULES = """\
ZASADY:
- Kolejność warstw: cleanser toner essence serum moisturizer [SPF dla AM]
- Respektuj incompatible_with (scope: same_step / same_day / same_period)
- Respektuj context_rules (safe_after_shaving, safe_after_acids itp.)
- Respektuj min_interval_hours i max_frequency_per_week
- Jeśli notatki użytkownika mówią o poprawie gęstości brody/wąsów, rozważ minoksydyl (jeśli jest dostępny na liście produktów)
- Dla minoksydylu respektuj usage_notes i ustaw region na obszar zarostu (broda/wąsy), jeśli to adekwatne
- 47 kroków na rutynę
- product_id musi być UUID produktu z listy lub null dla czynności pielęgnacyjnych
- action_type: tylko shaving_razor | shaving_oneblade | dermarolling (lub null)
@ -344,13 +386,12 @@ def suggest_routine(
data: SuggestRoutineRequest,
session: Session = Depends(get_session),
):
client, model = get_gemini_client()
weekday = data.routine_date.weekday()
skin_ctx = _build_skin_context(session)
grooming_ctx = _build_grooming_context(session, weekdays=[weekday])
history_ctx = _build_recent_history(session)
products_ctx = _build_products_context(session, time_filter=data.part_of_day.value)
objectives_ctx = _build_objectives_context(data.include_minoxidil_beard)
notes_line = f"\nKONTEKST OD UŻYTKOWNIKA: {data.notes}\n" if data.notes else ""
day_name = _DAY_NAMES[weekday]
@ -358,23 +399,21 @@ def suggest_routine(
prompt = (
f"Zaproponuj rutynę pielęgnacyjną {data.part_of_day.value.upper()} "
f"na {data.routine_date} ({day_name}).\n\n"
f"{skin_ctx}\n{grooming_ctx}\n{history_ctx}\n{products_ctx}\n{_RULES}{notes_line}"
f"{skin_ctx}\n{grooming_ctx}\n{history_ctx}\n{products_ctx}\n{objectives_ctx}\n{_RULES}{notes_line}"
"\nZwróć JSON zgodny ze schematem."
)
try:
response = client.models.generate_content(
model=model,
contents=prompt,
config=genai_types.GenerateContentConfig(
response_mime_type="application/json",
response_schema=_SuggestionOut,
max_output_tokens=4096,
temperature=0.4,
),
)
except Exception as e:
raise HTTPException(status_code=502, detail=f"Gemini API error: {e}")
response = call_gemini(
endpoint="routines/suggest",
contents=prompt,
config=genai_types.GenerateContentConfig(
response_mime_type="application/json",
response_schema=_SuggestionOut,
max_output_tokens=4096,
temperature=0.4,
),
user_input=prompt,
)
raw = response.text
if not raw:
@ -411,8 +450,6 @@ def suggest_batch(
if data.from_date > data.to_date:
raise HTTPException(status_code=400, detail="from_date must be <= to_date.")
client, model = get_gemini_client()
weekdays = list(
{(data.from_date + timedelta(days=i)).weekday() for i in range(delta)}
)
@ -420,6 +457,7 @@ def suggest_batch(
grooming_ctx = _build_grooming_context(session, weekdays=weekdays)
history_ctx = _build_recent_history(session)
products_ctx = _build_products_context(session)
objectives_ctx = _build_objectives_context(data.include_minoxidil_beard)
date_range_lines = []
for i in range(delta):
@ -431,7 +469,7 @@ def suggest_batch(
prompt = (
f"Zaproponuj plan pielęgnacji AM + PM dla każdego dnia z zakresu:\n{dates_str}\n\n"
f"{skin_ctx}\n{grooming_ctx}\n{history_ctx}\n{products_ctx}\n{_RULES}{notes_line}"
f"{skin_ctx}\n{grooming_ctx}\n{history_ctx}\n{products_ctx}\n{objectives_ctx}\n{_RULES}{notes_line}"
"\nDodatkowe zasady dla planu wielodniowego:\n"
" - Retinol/retinoidy: przestrzegaj max_frequency_per_week i min_interval_hours między użyciami\n"
" - Nie stosuj kwasów i retinoidów tego samego dnia\n"
@ -441,19 +479,17 @@ def suggest_batch(
"\nZwróć JSON zgodny ze schematem."
)
try:
response = client.models.generate_content(
model=model,
contents=prompt,
config=genai_types.GenerateContentConfig(
response_mime_type="application/json",
response_schema=_BatchOut,
max_output_tokens=8192,
temperature=0.4,
),
)
except Exception as e:
raise HTTPException(status_code=502, detail=f"Gemini API error: {e}")
response = call_gemini(
endpoint="routines/suggest-batch",
contents=prompt,
config=genai_types.GenerateContentConfig(
response_mime_type="application/json",
response_schema=_BatchOut,
max_output_tokens=8192,
temperature=0.4,
),
user_input=prompt,
)
raw = response.text
if not raw:

View file

@ -11,7 +11,7 @@ from sqlmodel import Session, SQLModel, select
from db import get_session
from innercontext.api.utils import get_or_404
from innercontext.llm import get_gemini_client
from innercontext.llm import call_gemini
from innercontext.models import (
SkinConditionSnapshot,
SkinConditionSnapshotBase,
@ -140,8 +140,6 @@ async def analyze_skin_photos(
if not (1 <= len(photos) <= 3):
raise HTTPException(status_code=422, detail="Send between 1 and 3 photos.")
client, model = get_gemini_client()
allowed = {"image/jpeg", "image/png", "image/webp"}
parts: list[genai_types.Part] = []
for photo in photos:
@ -163,20 +161,21 @@ async def analyze_skin_photos(
)
)
try:
response = client.models.generate_content(
model=model,
contents=parts,
config=genai_types.GenerateContentConfig(
system_instruction=_skin_photo_system_prompt(),
response_mime_type="application/json",
response_schema=_SkinAnalysisOut,
max_output_tokens=2048,
temperature=0.0,
),
)
except Exception as e:
raise HTTPException(status_code=502, detail=f"Gemini API error: {e}")
image_summary = (
f"{len(photos)} image(s): {', '.join(p.content_type for p in photos)}"
)
response = call_gemini(
endpoint="skincare/analyze-photos",
contents=parts,
config=genai_types.GenerateContentConfig(
system_instruction=_skin_photo_system_prompt(),
response_mime_type="application/json",
response_schema=_SkinAnalysisOut,
max_output_tokens=2048,
temperature=0.0,
),
user_input=image_summary,
)
try:
parsed = json.loads(response.text)

View file

@ -1,9 +1,12 @@
"""Shared helpers for Gemini API access."""
import os
import time
from contextlib import suppress
from fastapi import HTTPException
from google import genai
from google.genai import types as genai_types
_DEFAULT_MODEL = "gemini-flash-latest"
@ -18,3 +21,70 @@ def get_gemini_client() -> tuple[genai.Client, str]:
raise HTTPException(status_code=503, detail="GEMINI_API_KEY not configured")
model = os.environ.get("GEMINI_MODEL", _DEFAULT_MODEL)
return genai.Client(api_key=api_key), model
def call_gemini(
*,
endpoint: str,
contents,
config: genai_types.GenerateContentConfig,
user_input: str | None = None,
):
"""Call Gemini, log full request + response to DB, return response unchanged."""
from sqlmodel import Session
from db import engine # deferred to avoid circular import at module load
from innercontext.models.ai_log import AICallLog
client, model = get_gemini_client()
sys_prompt = None
if config.system_instruction:
raw = config.system_instruction
sys_prompt = raw if isinstance(raw, str) else str(raw)
if user_input is None:
with suppress(Exception):
user_input = str(contents)
start = time.monotonic()
success, error_detail, response = True, None, None
try:
response = client.models.generate_content(
model=model, contents=contents, config=config
)
except Exception as exc:
success = False
error_detail = str(exc)
raise HTTPException(status_code=502, detail=f"Gemini API error: {exc}") from exc
finally:
duration_ms = int((time.monotonic() - start) * 1000)
with suppress(Exception):
log = AICallLog(
endpoint=endpoint,
model=model,
system_prompt=sys_prompt,
user_input=user_input,
response_text=response.text if response else None,
prompt_tokens=(
response.usage_metadata.prompt_token_count
if response and response.usage_metadata
else None
),
completion_tokens=(
response.usage_metadata.candidates_token_count
if response and response.usage_metadata
else None
),
total_tokens=(
response.usage_metadata.total_token_count
if response and response.usage_metadata
else None
),
duration_ms=duration_ms,
success=success,
error_detail=error_detail,
)
with Session(engine) as s:
s.add(log)
s.commit()
return response

View file

@ -1,3 +1,4 @@
from .ai_log import AICallLog
from .domain import Domain
from .enums import (
AbsorptionSpeed,
@ -41,6 +42,8 @@ from .skincare import (
)
__all__ = [
# ai logs
"AICallLog",
# domain
"Domain",
# enums

View file

@ -0,0 +1,27 @@
from datetime import datetime
from typing import ClassVar
from uuid import UUID, uuid4
from sqlmodel import Field, SQLModel
from .base import utc_now
from .domain import Domain
class AICallLog(SQLModel, table=True):
__tablename__ = "ai_call_logs"
__domains__: ClassVar[frozenset[Domain]] = frozenset()
id: UUID = Field(default_factory=uuid4, primary_key=True)
created_at: datetime = Field(default_factory=utc_now, nullable=False)
endpoint: str = Field(index=True)
model: str
system_prompt: str | None = Field(default=None)
user_input: str | None = Field(default=None)
response_text: str | None = Field(default=None)
prompt_tokens: int | None = Field(default=None)
completion_tokens: int | None = Field(default=None)
total_tokens: int | None = Field(default=None)
duration_ms: int | None = Field(default=None)
success: bool = Field(default=True, index=True)
error_detail: str | None = Field(default=None)

View file

@ -10,6 +10,7 @@ from fastmcp.utilities.lifespan import combine_lifespans # noqa: E402
from db import create_db_and_tables # noqa: E402
from innercontext.api import ( # noqa: E402
ai_logs,
health,
inventory,
products,
@ -45,6 +46,7 @@ app.include_router(inventory.router, prefix="/inventory", tags=["inventory"])
app.include_router(health.router, prefix="/health", tags=["health"])
app.include_router(routines.router, prefix="/routines", tags=["routines"])
app.include_router(skincare.router, prefix="/skincare", tags=["skincare"])
app.include_router(ai_logs.router, prefix="/ai-logs", tags=["ai-logs"])
app.mount("/mcp", mcp_app)

View file

@ -132,6 +132,8 @@
"suggest_contextLabel": "Additional context for AI",
"suggest_contextOptional": "(optional)",
"suggest_contextPlaceholder": "e.g. party night, focusing on hydration...",
"suggest_minoxidilToggleLabel": "Prioritize beard/mustache density (minoxidil)",
"suggest_minoxidilToggleHint": "When enabled, AI will explicitly consider minoxidil for beard/mustache areas if available.",
"suggest_generateBtn": "Generate suggestion",
"suggest_generating": "Generating…",
"suggest_proposalTitle": "Suggestion",

View file

@ -132,6 +132,8 @@
"suggest_contextLabel": "Dodatkowy kontekst dla AI",
"suggest_contextOptional": "(opcjonalny)",
"suggest_contextPlaceholder": "np. wieczór imprezowy, skupiam się na nawilżeniu...",
"suggest_minoxidilToggleLabel": "Priorytet: gęstość brody/wąsów (minoksydyl)",
"suggest_minoxidilToggleHint": "Po włączeniu AI jawnie uwzględni minoksydyl dla obszaru brody/wąsów, jeśli jest dostępny.",
"suggest_generateBtn": "Generuj propozycję",
"suggest_generating": "Generuję…",
"suggest_proposalTitle": "Propozycja",

View file

@ -141,12 +141,14 @@ export const suggestRoutine = (body: {
routine_date: string;
part_of_day: PartOfDay;
notes?: string;
include_minoxidil_beard?: boolean;
}): Promise<RoutineSuggestion> => api.post('/routines/suggest', body);
export const suggestBatch = (body: {
from_date: string;
to_date: string;
notes?: string;
include_minoxidil_beard?: boolean;
}): Promise<BatchSuggestion> => api.post('/routines/suggest-batch', body);
export const getGroomingSchedule = (): Promise<GroomingSchedule[]> =>

View file

@ -14,13 +14,19 @@ export const actions: Actions = {
const routine_date = form.get('routine_date') as string;
const part_of_day = form.get('part_of_day') as 'am' | 'pm';
const notes = (form.get('notes') as string) || undefined;
const include_minoxidil_beard = form.get('include_minoxidil_beard') === 'on';
if (!routine_date || !part_of_day) {
return fail(400, { error: 'Data i pora dnia są wymagane.' });
}
try {
const suggestion = await suggestRoutine({ routine_date, part_of_day, notes });
const suggestion = await suggestRoutine({
routine_date,
part_of_day,
notes,
include_minoxidil_beard
});
return { suggestion, routine_date, part_of_day };
} catch (e) {
return fail(502, { error: (e as Error).message });
@ -32,6 +38,7 @@ export const actions: Actions = {
const from_date = form.get('from_date') as string;
const to_date = form.get('to_date') as string;
const notes = (form.get('notes') as string) || undefined;
const include_minoxidil_beard = form.get('include_minoxidil_beard') === 'on';
if (!from_date || !to_date) {
return fail(400, { error: 'Daty początkowa i końcowa są wymagane.' });
@ -44,7 +51,7 @@ export const actions: Actions = {
}
try {
const batch = await suggestBatch({ from_date, to_date, notes });
const batch = await suggestBatch({ from_date, to_date, notes, include_minoxidil_beard });
return { batch, from_date, to_date };
} catch (e) {
return fail(502, { error: (e as Error).message });

View file

@ -1,5 +1,6 @@
<script lang="ts">
import { enhance } from '$app/forms';
import { resolve } from '$app/paths';
import { SvelteSet } from 'svelte/reactivity';
import type { ActionData, PageData } from './$types';
import type { BatchSuggestion, RoutineSuggestion, SuggestedStep } from '$lib/types';
@ -104,7 +105,7 @@
<div class="max-w-2xl space-y-6">
<div class="flex items-center gap-4">
<a href="/routines" class="text-sm text-muted-foreground hover:underline">{m["suggest_backToRoutines"]()}</a>
<a href={resolve('/routines')} class="text-sm text-muted-foreground hover:underline">{m["suggest_backToRoutines"]()}</a>
<h2 class="text-2xl font-bold tracking-tight">{m.suggest_title()}</h2>
</div>
@ -152,6 +153,18 @@
class="w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring resize-none"
></textarea>
</div>
<div class="flex items-start gap-3 rounded-md border border-border px-3 py-2">
<input
id="single_include_minoxidil_beard"
name="include_minoxidil_beard"
type="checkbox"
class="mt-0.5 h-4 w-4 rounded border-input"
/>
<div class="space-y-0.5">
<Label for="single_include_minoxidil_beard" class="font-medium">{m["suggest_minoxidilToggleLabel"]()}</Label>
<p class="text-xs text-muted-foreground">{m["suggest_minoxidilToggleHint"]()}</p>
</div>
</div>
<Button type="submit" disabled={loadingSingle} class="w-full">
{#if loadingSingle}
@ -247,6 +260,18 @@
class="w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring resize-none"
></textarea>
</div>
<div class="flex items-start gap-3 rounded-md border border-border px-3 py-2">
<input
id="batch_include_minoxidil_beard"
name="include_minoxidil_beard"
type="checkbox"
class="mt-0.5 h-4 w-4 rounded border-input"
/>
<div class="space-y-0.5">
<Label for="batch_include_minoxidil_beard" class="font-medium">{m["suggest_minoxidilToggleLabel"]()}</Label>
<p class="text-xs text-muted-foreground">{m["suggest_minoxidilToggleHint"]()}</p>
</div>
</div>
<Button type="submit" disabled={loadingBatch} class="w-full">
{#if loadingBatch}

View file

@ -4,6 +4,7 @@ server {
# FastAPI backend — strip /api/ prefix
location /api/ {
client_max_body_size 16m; # up to 3 × 5 MB photos
proxy_pass http://127.0.0.1:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;