Spaces:
Sleeping
Sleeping
File size: 1,062 Bytes
a578549 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
import torch
app = FastAPI()
print("Loading model... this may take a minute.")
# Load the specific model requested
model_name = "protectai/deberta-v3-base-prompt-injection-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Create the pipeline
classifier = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
truncation=True,
max_length=512 # Ensure long prompts don't crash it
)
class PromptRequest(BaseModel):
prompt: str
@app.post("/analyze")
async def analyze_prompt(request: PromptRequest):
# Run the model
# The model returns labels like 'SAFE' or 'INJECTION'
result = classifier(request.prompt)
# Example output: [{'label': 'INJECTION', 'score': 0.998}]
data = result[0]
return {
"label": data['label'],
"score": data['score']
} |