toxic-api / app /api /routes.py
handrix
Initial deployment - Toxic Detection API
ae4e2a6
"""
API Routes
==========
FastAPI routes (Interface Segregation)
"""
from fastapi import APIRouter, Depends, HTTPException, status
from typing import Dict
from app.schemas.requests import AnalysisRequest
from app.schemas.responses import AnalysisResponse, HealthResponse, ErrorResponse
from app.services.analysis_service import analysis_service
from app.models.model_loader import model_loader
from app.core.config import settings
from app.core.exceptions import ModelNotLoadedException, AnalysisException
router = APIRouter()
@router.get(
"/",
response_model=Dict[str, str],
summary="Root endpoint",
tags=["General"]
)
async def root():
"""Root endpoint - API information"""
return {
"message": "Toxic Text Detection API",
"version": settings.API_VERSION,
"docs": "/docs",
"health": "/api/v1/health"
}
@router.get(
"/health",
response_model=HealthResponse,
summary="Health check",
tags=["General"]
)
async def health_check():
"""
Health check endpoint
Returns service status and model information
"""
return HealthResponse(
status="healthy" if model_loader.is_loaded() else "unhealthy",
model_loaded=model_loader.is_loaded(),
device=str(model_loader.device) if model_loader.is_loaded() else "unknown",
model_name=settings.MODEL_NAME,
version=settings.API_VERSION
)
@router.post(
"/analyze",
response_model=AnalysisResponse,
responses={
200: {"description": "Analysis successful"},
400: {"model": ErrorResponse, "description": "Invalid input"},
500: {"model": ErrorResponse, "description": "Analysis failed"},
503: {"model": ErrorResponse, "description": "Model not loaded"}
},
summary="Analyze text for toxicity",
tags=["Analysis"]
)
async def analyze_text(request: AnalysisRequest):
"""
Analyze text for toxic content
This endpoint analyzes Vietnamese text to detect toxic content using
a fine-tuned PhoBERT model with gradient-based explainability.
**Features:**
- Sentence-level toxicity detection
- Word-level importance scores
- HTML highlighting of toxic content
- Detailed statistics
**Parameters:**
- **text**: Text to analyze (required)
- **include_html**: Include HTML highlighting (default: true)
- **include_word_scores**: Include word-level scores (default: true)
- **include_summary_table**: Include summary table (default: false)
**Returns:**
- Overall toxicity label (toxic/clean)
- Sentence-level analysis
- Word-level scores and toxic words summary
- HTML with highlighted toxic content
- Statistical information
"""
# Check if model is loaded
if not model_loader.is_loaded():
raise ModelNotLoadedException()
# Perform analysis
try:
result = analysis_service.analyze(request)
return result
except AnalysisException as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e)
)
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Unexpected error: {str(e)}"
)