Spaces:
Running
Running
use gpt-5-nano as se guardrail
Browse files
app.py
CHANGED
|
@@ -736,7 +736,7 @@ with gr.Blocks() as app:
|
|
| 736 |
|
| 737 |
def guardrail_check_se_relevance(user_input):
|
| 738 |
"""
|
| 739 |
-
Use gpt-
|
| 740 |
Return True if it is SE-related, otherwise False.
|
| 741 |
"""
|
| 742 |
# Example instructions for classification — adjust to your needs
|
|
@@ -753,7 +753,7 @@ with gr.Blocks() as app:
|
|
| 753 |
try:
|
| 754 |
# Make the chat completion call
|
| 755 |
response = openai_client.chat.completions.create(
|
| 756 |
-
model="gpt-
|
| 757 |
)
|
| 758 |
classification = response.choices[0].message.content.strip().lower()
|
| 759 |
# Check if the LLM responded with 'Yes'
|
|
|
|
| 736 |
|
| 737 |
def guardrail_check_se_relevance(user_input):
|
| 738 |
"""
|
| 739 |
+
Use gpt-5-nano to check if the user input is SE-related.
|
| 740 |
Return True if it is SE-related, otherwise False.
|
| 741 |
"""
|
| 742 |
# Example instructions for classification — adjust to your needs
|
|
|
|
| 753 |
try:
|
| 754 |
# Make the chat completion call
|
| 755 |
response = openai_client.chat.completions.create(
|
| 756 |
+
model="gpt-5-nano", messages=[system_message, user_message]
|
| 757 |
)
|
| 758 |
classification = response.choices[0].message.content.strip().lower()
|
| 759 |
# Check if the LLM responded with 'Yes'
|