Spaces:
Sleeping
Sleeping
| """ | |
| API Routes | |
| ========== | |
| FastAPI routes (Interface Segregation) | |
| """ | |
| from fastapi import APIRouter, Depends, HTTPException, status | |
| from typing import Dict | |
| from app.schemas.requests import AnalysisRequest | |
| from app.schemas.responses import AnalysisResponse, HealthResponse, ErrorResponse | |
| from app.services.analysis_service import analysis_service | |
| from app.models.model_loader import model_loader | |
| from app.core.config import settings | |
| from app.core.exceptions import ModelNotLoadedException, AnalysisException | |
| router = APIRouter() | |
| async def root(): | |
| """Root endpoint - API information""" | |
| return { | |
| "message": "Toxic Text Detection API", | |
| "version": settings.API_VERSION, | |
| "docs": "/docs", | |
| "health": "/api/v1/health" | |
| } | |
| async def health_check(): | |
| """ | |
| Health check endpoint | |
| Returns service status and model information | |
| """ | |
| return HealthResponse( | |
| status="healthy" if model_loader.is_loaded() else "unhealthy", | |
| model_loaded=model_loader.is_loaded(), | |
| device=str(model_loader.device) if model_loader.is_loaded() else "unknown", | |
| model_name=settings.MODEL_NAME, | |
| version=settings.API_VERSION | |
| ) | |
| async def analyze_text(request: AnalysisRequest): | |
| """ | |
| Analyze text for toxic content | |
| This endpoint analyzes Vietnamese text to detect toxic content using | |
| a fine-tuned PhoBERT model with gradient-based explainability. | |
| **Features:** | |
| - Sentence-level toxicity detection | |
| - Word-level importance scores | |
| - HTML highlighting of toxic content | |
| - Detailed statistics | |
| **Parameters:** | |
| - **text**: Text to analyze (required) | |
| - **include_html**: Include HTML highlighting (default: true) | |
| - **include_word_scores**: Include word-level scores (default: true) | |
| - **include_summary_table**: Include summary table (default: false) | |
| **Returns:** | |
| - Overall toxicity label (toxic/clean) | |
| - Sentence-level analysis | |
| - Word-level scores and toxic words summary | |
| - HTML with highlighted toxic content | |
| - Statistical information | |
| """ | |
| # Check if model is loaded | |
| if not model_loader.is_loaded(): | |
| raise ModelNotLoadedException() | |
| # Perform analysis | |
| try: | |
| result = analysis_service.analyze(request) | |
| return result | |
| except AnalysisException as e: | |
| raise HTTPException( | |
| status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, | |
| detail=str(e) | |
| ) | |
| except Exception as e: | |
| raise HTTPException( | |
| status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, | |
| detail=f"Unexpected error: {str(e)}" | |
| ) | |