Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
Β·
a2104ab
1
Parent(s):
6abd01e
Refactor trivia and weather functions; remove trivia-related code and update help text
Browse files- app.py +0 -47
- config.yaml +0 -3
- polLLM.py +22 -16
app.py
CHANGED
|
@@ -101,7 +101,6 @@ client = BotClient(BotConfig)
|
|
| 101 |
# --- Threading, Queues, Stores ---
|
| 102 |
|
| 103 |
task_queue = queue.Queue()
|
| 104 |
-
trivia_store = {}
|
| 105 |
polls = {}
|
| 106 |
last_message_time = time.time()
|
| 107 |
|
|
@@ -169,48 +168,10 @@ def _fn_weather(message_id, chat_id, loc):
|
|
| 169 |
"prompt":f"Speak only this weather report: {report}"
|
| 170 |
})
|
| 171 |
|
| 172 |
-
def _fn_weather_poem(message_id, chat_id, loc):
|
| 173 |
-
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
| 174 |
-
poem = generate_llm(
|
| 175 |
-
f"Write a short, poetic weather summary in Celsius based on:\n\n{raw}"
|
| 176 |
-
)
|
| 177 |
-
client.send_message(message_id, chat_id, poem)
|
| 178 |
-
task_queue.put({
|
| 179 |
-
"type":"audio","message_id":message_id,"chat_id":chat_id,
|
| 180 |
-
"prompt":f"Speak only this poetic weather summary: {poem}"
|
| 181 |
-
})
|
| 182 |
-
|
| 183 |
def _fn_inspire(message_id, chat_id):
|
| 184 |
quote = generate_llm(f"Give me a short inspirational unique quote.")
|
| 185 |
client.send_message(message_id, chat_id, f"β¨ {quote}")
|
| 186 |
|
| 187 |
-
def _fn_trivia(message_id, chat_id):
|
| 188 |
-
raw = generate_llm(
|
| 189 |
-
f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
|
| 190 |
-
)
|
| 191 |
-
try:
|
| 192 |
-
obj = json.loads(raw.strip().strip("```json").strip("```"))
|
| 193 |
-
trivia_store[chat_id] = obj
|
| 194 |
-
client.send_message(
|
| 195 |
-
message_id, chat_id,
|
| 196 |
-
f"β {obj['question']}\nReply `/answer` or `/answer your guess`."
|
| 197 |
-
)
|
| 198 |
-
except:
|
| 199 |
-
client.send_message(message_id, chat_id, "Failed to generate trivia.")
|
| 200 |
-
|
| 201 |
-
def _fn_answer(message_id, chat_id, guess):
|
| 202 |
-
if chat_id not in trivia_store:
|
| 203 |
-
client.send_message(message_id, chat_id, "No active trivia. `/trivia` to start.")
|
| 204 |
-
return
|
| 205 |
-
qa = trivia_store.pop(chat_id)
|
| 206 |
-
if guess:
|
| 207 |
-
verdict = generate_llm(
|
| 208 |
-
f"Q: {qa['question']}\nCorrect: {qa['answer']}\nUser: {guess}\nCorrect?"
|
| 209 |
-
)
|
| 210 |
-
client.send_message(message_id, chat_id, verdict)
|
| 211 |
-
else:
|
| 212 |
-
client.send_message(message_id, chat_id, f"π‘ Answer: {qa['answer']}")
|
| 213 |
-
|
| 214 |
def _fn_meme(message_id, chat_id, txt):
|
| 215 |
client.send_message(message_id, chat_id, "π¨ Generating your meme...")
|
| 216 |
task_queue.put({"type":"image","message_id":message_id,
|
|
@@ -334,9 +295,7 @@ help_text = (
|
|
| 334 |
"β’ /translate <lang>|<text>\n"
|
| 335 |
"β’ /joke\n"
|
| 336 |
"β’ /weather <loc>\n"
|
| 337 |
-
"β’ /weatherpoem <loc>\n"
|
| 338 |
"β’ /inspire\n"
|
| 339 |
-
"β’ /trivia / /answer\n"
|
| 340 |
"β’ /meme <text>\n"
|
| 341 |
"β’ /poll <Q>|<opt1>|β¦ / /results / /endpoll\n"
|
| 342 |
"β’ /gen <prompt>|<count>\n"
|
|
@@ -403,14 +362,8 @@ async def whatsapp_webhook(request: Request):
|
|
| 403 |
_fn_joke(mid, chat_id); return {"success": True}
|
| 404 |
if low.startswith("/weather "):
|
| 405 |
_fn_weather(mid, chat_id, body[9:].strip().replace(" ","+")); return {"success": True}
|
| 406 |
-
if low.startswith("/weatherpoem "):
|
| 407 |
-
_fn_weather_poem(mid, chat_id, body[13:].strip().replace(" ","+")); return {"success": True}
|
| 408 |
if low == "/inspire":
|
| 409 |
_fn_inspire(mid, chat_id); return {"success": True}
|
| 410 |
-
if low == "/trivia":
|
| 411 |
-
_fn_trivia(mid, chat_id); return {"success": True}
|
| 412 |
-
if low.startswith("/answer"):
|
| 413 |
-
_fn_answer(mid, chat_id, body[7:].strip()); return {"success": True}
|
| 414 |
if low.startswith("/meme "):
|
| 415 |
_fn_meme(mid, chat_id, body[6:].strip()); return {"success": True}
|
| 416 |
if low.startswith("/poll "):
|
|
|
|
| 101 |
# --- Threading, Queues, Stores ---
|
| 102 |
|
| 103 |
task_queue = queue.Queue()
|
|
|
|
| 104 |
polls = {}
|
| 105 |
last_message_time = time.time()
|
| 106 |
|
|
|
|
| 168 |
"prompt":f"Speak only this weather report: {report}"
|
| 169 |
})
|
| 170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
def _fn_inspire(message_id, chat_id):
|
| 172 |
quote = generate_llm(f"Give me a short inspirational unique quote.")
|
| 173 |
client.send_message(message_id, chat_id, f"β¨ {quote}")
|
| 174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
def _fn_meme(message_id, chat_id, txt):
|
| 176 |
client.send_message(message_id, chat_id, "π¨ Generating your meme...")
|
| 177 |
task_queue.put({"type":"image","message_id":message_id,
|
|
|
|
| 295 |
"β’ /translate <lang>|<text>\n"
|
| 296 |
"β’ /joke\n"
|
| 297 |
"β’ /weather <loc>\n"
|
|
|
|
| 298 |
"β’ /inspire\n"
|
|
|
|
| 299 |
"β’ /meme <text>\n"
|
| 300 |
"β’ /poll <Q>|<opt1>|β¦ / /results / /endpoll\n"
|
| 301 |
"β’ /gen <prompt>|<count>\n"
|
|
|
|
| 362 |
_fn_joke(mid, chat_id); return {"success": True}
|
| 363 |
if low.startswith("/weather "):
|
| 364 |
_fn_weather(mid, chat_id, body[9:].strip().replace(" ","+")); return {"success": True}
|
|
|
|
|
|
|
| 365 |
if low == "/inspire":
|
| 366 |
_fn_inspire(mid, chat_id); return {"success": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
if low.startswith("/meme "):
|
| 368 |
_fn_meme(mid, chat_id, body[6:].strip()); return {"success": True}
|
| 369 |
if low.startswith("/poll "):
|
config.yaml
CHANGED
|
@@ -10,10 +10,7 @@ config:
|
|
| 10 |
β’ /translate <lang>|<text> β translate text
|
| 11 |
β’ /joke β tell a short joke
|
| 12 |
β’ /weather <location> β short, creative weather report in Β°C
|
| 13 |
-
β’ /weatherpoem <location> β poetic weather summary in Β°C
|
| 14 |
β’ /inspire β inspirational quote
|
| 15 |
-
β’ /trivia β start a trivia question
|
| 16 |
-
β’ /answer β answer the current trivia
|
| 17 |
β’ /meme <text> β generate a meme image
|
| 18 |
β’ /poll <Q>|<opt1>|<opt2>|β¦ β create a poll
|
| 19 |
β’ /results β show poll results
|
|
|
|
| 10 |
β’ /translate <lang>|<text> β translate text
|
| 11 |
β’ /joke β tell a short joke
|
| 12 |
β’ /weather <location> β short, creative weather report in Β°C
|
|
|
|
| 13 |
β’ /inspire β inspirational quote
|
|
|
|
|
|
|
| 14 |
β’ /meme <text> β generate a meme image
|
| 15 |
β’ /poll <Q>|<opt1>|<opt2>|β¦ β create a poll
|
| 16 |
β’ /results β show poll results
|
polLLM.py
CHANGED
|
@@ -1,12 +1,16 @@
|
|
| 1 |
import os
|
| 2 |
-
import
|
| 3 |
-
import urllib.parse
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from utils import read_config
|
| 6 |
import random
|
| 7 |
|
| 8 |
load_dotenv()
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def pre_process():
|
| 11 |
# Read the configuration and substitute the character placeholder
|
| 12 |
config = read_config()
|
|
@@ -14,25 +18,27 @@ def pre_process():
|
|
| 14 |
char = config['llm']['char']
|
| 15 |
return system_prompt.replace("{char}", char)
|
| 16 |
|
| 17 |
-
def generate_llm(prompt):
|
| 18 |
system_prompt = pre_process()
|
| 19 |
-
# Encode the user prompt and system prompt for URL safety
|
| 20 |
-
encoded_prompt = urllib.parse.quote(prompt)
|
| 21 |
-
encoded_system = urllib.parse.quote(system_prompt)
|
| 22 |
-
# Build the GET request URL for Pollinations' text API
|
| 23 |
-
randomSeed = random.randint(0, 9999999)
|
| 24 |
-
print(f"DEBUG: Random seed: {randomSeed}")
|
| 25 |
-
url = f"https://text.pollinations.ai/{encoded_prompt}?model=openai-large&private=true&system={encoded_system}&seed={randomSeed}"
|
| 26 |
|
| 27 |
try:
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
except Exception as e:
|
| 33 |
return f"Error: {str(e)}"
|
| 34 |
|
| 35 |
# Example usage (can be removed or commented out in production):
|
| 36 |
if __name__ == "__main__":
|
| 37 |
-
sample_prompt = "
|
| 38 |
-
print("Response:", generate_llm(sample_prompt))
|
|
|
|
| 1 |
import os
|
| 2 |
+
from openai import OpenAI
|
|
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
from utils import read_config
|
| 5 |
import random
|
| 6 |
|
| 7 |
load_dotenv()
|
| 8 |
|
| 9 |
+
client = OpenAI(
|
| 10 |
+
base_url="https://text.pollinations.ai/openai",
|
| 11 |
+
api_key="YOUR_API_KEY" # Add if needed
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
def pre_process():
|
| 15 |
# Read the configuration and substitute the character placeholder
|
| 16 |
config = read_config()
|
|
|
|
| 18 |
char = config['llm']['char']
|
| 19 |
return system_prompt.replace("{char}", char)
|
| 20 |
|
| 21 |
+
def generate_llm(prompt, model="openai-large", max_tokens=100):
|
| 22 |
system_prompt = pre_process()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
try:
|
| 25 |
+
# Use OpenAI's ChatCompletion API
|
| 26 |
+
randomSeed = random.randint(0, 9999999)
|
| 27 |
+
response = client.chat.completions.create(
|
| 28 |
+
model=model,
|
| 29 |
+
messages=[
|
| 30 |
+
{"role": "system", "content": system_prompt},
|
| 31 |
+
{"role": "user", "content": prompt}
|
| 32 |
+
],
|
| 33 |
+
max_tokens=max_tokens,
|
| 34 |
+
seed=randomSeed
|
| 35 |
+
)
|
| 36 |
+
# Return the generated text
|
| 37 |
+
return response.choices[0].message.content.strip()
|
| 38 |
except Exception as e:
|
| 39 |
return f"Error: {str(e)}"
|
| 40 |
|
| 41 |
# Example usage (can be removed or commented out in production):
|
| 42 |
if __name__ == "__main__":
|
| 43 |
+
sample_prompt = f"Generate a unique trivia Q&A in JSON: {{\"question\":\"...\",\"answer\":\"...\"}}"
|
| 44 |
+
print("Response:", generate_llm(sample_prompt))
|