toxic-api / app /core /config.py
handrix
Initial deployment - Toxic Detection API
ae4e2a6
"""
Core Configuration
==================
Application settings using Pydantic Settings
"""
from pydantic_settings import BaseSettings
from typing import List
import torch
class Settings(BaseSettings):
"""Application settings"""
# Model Configuration
MODEL_NAME: str = "vinai/phobert-base"
MODEL_PATH: str = "./models/PhoBERTFineTuned_best.pth"
MAX_LENGTH: int = 128
DEVICE: str = "cuda" if torch.cuda.is_available() else "cpu"
# API Configuration
API_TITLE: str = "Toxic Text Detection API"
API_VERSION: str = "1.0.0"
API_DESCRIPTION: str = "Vietnamese toxic text detection with gradient-based explainability"
API_HOST: str = "0.0.0.0"
API_PORT: int = 8000
API_RELOAD: bool = True
# CORS
ALLOWED_ORIGINS: List[str] = ["*"]
# Analysis Settings
GRADIENT_STEPS: int = 20
PERCENTILE_THRESHOLD: int = 75
MIN_WORD_LENGTH: int = 2
# Logging
LOG_LEVEL: str = "INFO"
class Config:
env_file = ".env"
case_sensitive = True
# Singleton instance
settings = Settings()