Spaces:
Running
Running
| import streamlit as st | |
| import requests | |
| import os | |
| # Set page title and layout | |
| st.set_page_config(page_title="Super Prompt Generator", layout="wide") | |
| # API key from environment variable | |
| API_KEY = os.environ.get("NEBIUS_API_KEY") | |
| if not API_KEY: | |
| st.error("API key not found. Please set the `NEBIUS_API_KEY` environment variable.") | |
| # Function to call Nebius API | |
| def generate_response(prompt, api_key): | |
| api_url = "https://api.studio.nebius.ai/v1/chat/completions" | |
| headers = {"Authorization": f"Bearer {api_key}"} | |
| payload = { | |
| "model": "openai/gpt-oss-120b", | |
| "messages": [ | |
| {"role": "system", "content": """You are a Prompt Generator designed to create task-specific prompts for various user requests. Your goal is to structure prompts in a clear and organized format, ensuring that each step or instruction is well-defined and actionable. | |
| Generate Prompt: | |
| Task: | |
| [Specify the task or action the user needs help with, always give it a persona like you are a {task based on user input} generator eg- article generator, your is to generate {Define Task in simple way}] | |
| Objective: | |
| [Define the goal or purpose of the task, including what the user aims to achieve] | |
| Steps: | |
| [List the steps or instructions required to complete the task] | |
| Considerations: | |
| [Include any additional factors the user should consider, such as limitations, preferences, or specific conditions] | |
| Output Format: | |
| [Describe the desired output format, whether it's a report, image, text, or other deliverables] | |
| Guidelines for Task-Specific Prompts: | |
| Structure the task prompt clearly with numbered steps or bullet points for easy understanding. | |
| Tailor the language and level of complexity based on the user’s input or desired difficulty level. | |
| Ensure the prompt is actionable, providing clear instructions that lead to the intended outcome. Don't write anything right now wait for my command."""}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "temperature": 0.6, | |
| "max_tokens": 750, | |
| "top_p": 0.9, | |
| "top_k": 50 | |
| } | |
| response = requests.post(api_url, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| return response.json() | |
| else: | |
| st.error(f"Error: {response.status_code}, {response.text}") | |
| return None | |
| # Custom CSS for centering | |
| st.markdown( | |
| """ | |
| <style> | |
| .title-container { | |
| text-align: center; | |
| margin-bottom: 20px; | |
| } | |
| </style> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # Input bar for user prompt | |
| user_input = st.text_area( | |
| label="System Prompt Generator", | |
| placeholder="Type or Paste Your Input..." | |
| ) | |
| if st.button("Generate", use_container_width=True): | |
| if user_input.strip(): | |
| with st.spinner("Generating... Please wait!"): | |
| result = generate_response(user_input, API_KEY) | |
| if result: | |
| try: | |
| # Extracting generated response | |
| assistant_message = result["choices"][0]["message"]["content"] | |
| # Remove Markdown-like formatting characters | |
| import re | |
| cleaned_message = re.sub(r"\*\*|__", "", assistant_message) # Removes ** and __ | |
| # Displaying cleaned output in a plain text code block | |
| st.code(cleaned_message, language="text") | |
| except KeyError as e: | |
| st.error(f"Unexpected response format: {e}") | |
| else: | |
| st.warning("Please provide input before clicking Generate.") | |