104 lines
4.5 KiB
Python
Executable File
104 lines
4.5 KiB
Python
Executable File
# /opt/docker/dev/service_finder/backend/app/services/ai_service.py
|
|
import os
|
|
import json
|
|
import logging
|
|
import asyncio
|
|
import httpx
|
|
from typing import Dict, Any, Optional, List
|
|
from sqlalchemy import select
|
|
|
|
from app.db.session import AsyncSessionLocal
|
|
from app.models.system import SystemParameter
|
|
from app.services.config_service import config # 2.2-es központi config
|
|
|
|
logger = logging.getLogger("AI-Service-2.2")
|
|
|
|
class AIService:
|
|
"""
|
|
Sentinel Master AI Service 2.2.
|
|
Felelős az LLM hívásokért, prompt sablonok kezeléséért és az OCR feldolgozásért.
|
|
Minden paraméter (modell, url, prompt, hőmérséklet) adminból vezérelt.
|
|
"""
|
|
|
|
@classmethod
|
|
async def _execute_ai_call(cls, db, prompt: str, model_key: str = "text", images: Optional[List[str]] = None) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Központi AI végrehajtó. Kezeli a modellt, a várakozást és a JSON parzolást.
|
|
"""
|
|
try:
|
|
# 1. ADMIN KONFIGURÁCIÓ LEKÉRÉSE
|
|
base_url = await config.get_setting(db, "ai_ollama_url", default="http://ollama:11434/api/generate")
|
|
delay = await config.get_setting(db, "AI_REQUEST_DELAY", default=0.1)
|
|
|
|
# Modell választás (text vagy vision)
|
|
model_name = await config.get_setting(db, f"ai_model_{model_key}", default="qwen2.5-coder:32b")
|
|
temp = await config.get_setting(db, "ai_temperature", default=0.1)
|
|
timeout_val = await config.get_setting(db, "ai_timeout", default=120.0)
|
|
|
|
await asyncio.sleep(float(delay))
|
|
|
|
# 2. PAYLOAD ÖSSZEÁLLÍTÁSA
|
|
payload = {
|
|
"model": model_name,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"format": "json",
|
|
"options": {"temperature": float(temp)}
|
|
}
|
|
|
|
if images: # Llava/Vision támogatás
|
|
payload["images"] = images
|
|
|
|
# 3. HTTP HÍVÁS
|
|
async with httpx.AsyncClient(timeout=float(timeout_val)) as client:
|
|
response = await client.post(base_url, json=payload)
|
|
response.raise_for_status()
|
|
|
|
raw_res = response.json().get("response", "{}")
|
|
return json.loads(raw_res)
|
|
|
|
except json.JSONDecodeError as je:
|
|
logger.error(f"❌ AI JSON hiba (parszolási hiba): {je}")
|
|
return None
|
|
except Exception as e:
|
|
logger.error(f"❌ AI hívás kritikus hiba: {e}")
|
|
return None
|
|
|
|
@classmethod
|
|
async def get_gold_data_from_research(cls, make: str, model: str, raw_context: str) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Robot 3 (Alchemist) dúsító folyamata.
|
|
Kutatási adatokból csinál tiszta technikai adatlapot.
|
|
"""
|
|
async with AsyncSessionLocal() as db:
|
|
template = await config.get_setting(db, "ai_prompt_gold_data",
|
|
default="Extract technical car data for {make} {model} from: {context}")
|
|
|
|
full_prompt = template.format(make=make, model=model, context=raw_context)
|
|
return await cls._execute_ai_call(db, full_prompt, model_key="text")
|
|
|
|
@classmethod
|
|
async def get_clean_vehicle_data(cls, make: str, raw_model: str, sources: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Név normalizálás és szinonima gyűjtés.
|
|
"""
|
|
async with AsyncSessionLocal() as db:
|
|
template = await config.get_setting(db, "ai_prompt_normalization",
|
|
default="Normalize car model names: {make} {model}. Sources: {sources}")
|
|
|
|
full_prompt = template.format(make=make, model=raw_model, sources=json.dumps(sources))
|
|
return await cls._execute_ai_call(db, full_prompt, model_key="text")
|
|
|
|
@classmethod
|
|
async def process_ocr_document(cls, doc_type: str, base64_image: str) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Robot 1 (OCR) látó folyamata.
|
|
Képet (base64) küld a Vision modellnek (pl. Llava).
|
|
"""
|
|
async with AsyncSessionLocal() as db:
|
|
# Külön prompt sablon minden dokumentum típushoz (számla, forgalmi, adásvételi)
|
|
template = await config.get_setting(db, f"ai_prompt_ocr_{doc_type}",
|
|
default="Analyze this {doc_type} image and return structured JSON data.")
|
|
|
|
full_prompt = template.format(doc_type=doc_type)
|
|
return await cls._execute_ai_call(db, full_prompt, model_key="vision", images=[base64_image]) |