Commit
·
2c69450
1
Parent(s):
00628ec
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,6 +25,7 @@ from g4f.Provider import (
|
|
| 25 |
BingHuan,
|
| 26 |
Wewordle,
|
| 27 |
ChatgptAi,
|
|
|
|
| 28 |
)
|
| 29 |
import os
|
| 30 |
import json
|
|
@@ -47,7 +48,6 @@ from langchain.utilities import WikipediaAPIWrapper
|
|
| 47 |
from langchain.tools import DuckDuckGoSearchRun
|
| 48 |
from models_for_langchain.memory_func import validate_memory_len
|
| 49 |
|
| 50 |
-
|
| 51 |
provider_dict = {
|
| 52 |
'Ails': Ails,
|
| 53 |
'You': You,
|
|
@@ -72,15 +72,13 @@ provider_dict = {
|
|
| 72 |
'BingHuan': BingHuan,
|
| 73 |
'Wewordle': Wewordle,
|
| 74 |
'ChatgptAi': ChatgptAi,
|
|
|
|
| 75 |
}
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
'falcon-13b':['H2o'],
|
| 82 |
-
'llama-13b':['H2o']
|
| 83 |
-
}
|
| 84 |
|
| 85 |
def change_prompt_set(prompt_set_name):
|
| 86 |
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
|
|
@@ -141,8 +139,13 @@ def bot(history, model_name, provider_name, system_msg, agent):
|
|
| 141 |
prompt = new_template.format(
|
| 142 |
chat_history = prev_memory,
|
| 143 |
)
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
for c in bot_msg:
|
| 148 |
history[-1][1] += c
|
|
@@ -187,8 +190,8 @@ with gr.Blocks() as demo:
|
|
| 187 |
memory = ConversationBufferWindowMemory(k=6, memory_key="chat_history")
|
| 188 |
with gr.Row():
|
| 189 |
model_name = gr.Dropdown(list(available_dict.keys()), value='gpt-3.5-turbo', label='模型')
|
| 190 |
-
provider = gr.Dropdown(available_dict['gpt-3.5-turbo'], value='
|
| 191 |
-
agent = gr.Dropdown(['系统提示', '维基百科'
|
| 192 |
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
|
| 193 |
|
| 194 |
chatbot = gr.Chatbot([[None, None]], label='AI')
|
|
|
|
| 25 |
BingHuan,
|
| 26 |
Wewordle,
|
| 27 |
ChatgptAi,
|
| 28 |
+
opchatgpts
|
| 29 |
)
|
| 30 |
import os
|
| 31 |
import json
|
|
|
|
| 48 |
from langchain.tools import DuckDuckGoSearchRun
|
| 49 |
from models_for_langchain.memory_func import validate_memory_len
|
| 50 |
|
|
|
|
| 51 |
provider_dict = {
|
| 52 |
'Ails': Ails,
|
| 53 |
'You': You,
|
|
|
|
| 72 |
'BingHuan': BingHuan,
|
| 73 |
'Wewordle': Wewordle,
|
| 74 |
'ChatgptAi': ChatgptAi,
|
| 75 |
+
'opchatgpts': opchatgpts
|
| 76 |
}
|
| 77 |
|
| 78 |
+
|
| 79 |
+
with open("available_dict.txt", "r") as fp:
|
| 80 |
+
# Load the dictionary from the file
|
| 81 |
+
available_dict = json.load(fp)
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
def change_prompt_set(prompt_set_name):
|
| 84 |
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
|
|
|
|
| 139 |
prompt = new_template.format(
|
| 140 |
chat_history = prev_memory,
|
| 141 |
)
|
| 142 |
+
print(f'prompt = \n --------\n{prompt}\n --------')
|
| 143 |
+
for _ in range(3):
|
| 144 |
+
try:
|
| 145 |
+
bot_msg = llm._call(prompt=prompt)
|
| 146 |
+
break
|
| 147 |
+
except:
|
| 148 |
+
bot_msg = '服务器无响应,请更换提供者或者清空对话后重试。'
|
| 149 |
|
| 150 |
for c in bot_msg:
|
| 151 |
history[-1][1] += c
|
|
|
|
| 190 |
memory = ConversationBufferWindowMemory(k=6, memory_key="chat_history")
|
| 191 |
with gr.Row():
|
| 192 |
model_name = gr.Dropdown(list(available_dict.keys()), value='gpt-3.5-turbo', label='模型')
|
| 193 |
+
provider = gr.Dropdown(available_dict['gpt-3.5-turbo'], value='GetGpt', label='提供者', min_width=20)
|
| 194 |
+
agent = gr.Dropdown(['系统提示', '维基百科'], value='系统提示', label='Agent')
|
| 195 |
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
|
| 196 |
|
| 197 |
chatbot = gr.Chatbot([[None, None]], label='AI')
|