Spaces:
Runtime error
Runtime error
蒲源
commited on
Commit
·
8e57b83
1
Parent(s):
ca8843c
feature(pu): add deepseek support ad set it as default llm
Browse files- app_mqa_database.py +2 -2
- rag_demo.py +28 -3
app_mqa_database.py
CHANGED
|
@@ -169,10 +169,10 @@ def rag_answer(question, k=5, user_id='user'):
|
|
| 169 |
answer = best_answer
|
| 170 |
else:
|
| 171 |
retriever = get_retriever(vectorstore, k)
|
| 172 |
-
rag_chain = setup_rag_chain(model_name='
|
| 173 |
history_str = "\n".join([f"{role}: {text}" for role, text in conversation_history[user_id]])
|
| 174 |
history_question = [history_str, question]
|
| 175 |
-
retrieved_documents, answer = execute_query(retriever, rag_chain, history_question, model_name='
|
| 176 |
temperature=temperature)
|
| 177 |
|
| 178 |
# 获取总的对话记录数
|
|
|
|
| 169 |
answer = best_answer
|
| 170 |
else:
|
| 171 |
retriever = get_retriever(vectorstore, k)
|
| 172 |
+
rag_chain = setup_rag_chain(model_name='deepseek', temperature=temperature)
|
| 173 |
history_str = "\n".join([f"{role}: {text}" for role, text in conversation_history[user_id]])
|
| 174 |
history_question = [history_str, question]
|
| 175 |
+
retrieved_documents, answer = execute_query(retriever, rag_chain, history_question, model_name='deepseek',
|
| 176 |
temperature=temperature)
|
| 177 |
|
| 178 |
# 获取总的对话记录数
|
rag_demo.py
CHANGED
|
@@ -18,7 +18,7 @@ from langchain.vectorstores import Weaviate
|
|
| 18 |
from weaviate import Client
|
| 19 |
from weaviate.embedded import EmbeddedOptions
|
| 20 |
from zhipuai import ZhipuAI
|
| 21 |
-
from openai import AzureOpenAI
|
| 22 |
|
| 23 |
# 环境设置与文档下载
|
| 24 |
load_dotenv() # 加载环境变量
|
|
@@ -27,6 +27,7 @@ MIMIMAX_API_KEY = os.getenv("MIMIMAX_API_KEY")
|
|
| 27 |
MIMIMAX_GROUP_ID = os.getenv("MIMIMAX_GROUP_ID")
|
| 28 |
ZHIPUAI_API_KEY = os.getenv("ZHIPUAI_API_KEY")
|
| 29 |
KIMI_OPENAI_API_KEY = os.getenv("KIMI_OPENAI_API_KEY")
|
|
|
|
| 30 |
|
| 31 |
AZURE_OPENAI_KEY = os.getenv("AZURE_OPENAI_KEY")
|
| 32 |
AZURE_ENDPOINT = os.getenv("AZURE_ENDPOINT")
|
|
@@ -203,7 +204,6 @@ def execute_query_no_rag(model_name="gpt-4", temperature=0, query=""):
|
|
| 203 |
return response.choices[0].message.content
|
| 204 |
elif model_name == 'kimi':
|
| 205 |
# 如果是'kimi'模型,使用专门的API调用方式
|
| 206 |
-
from openai import OpenAI
|
| 207 |
client = OpenAI(
|
| 208 |
api_key=KIMI_OPENAI_API_KEY,
|
| 209 |
base_url="https://api.moonshot.cn/v1",
|
|
@@ -226,6 +226,29 @@ def execute_query_no_rag(model_name="gpt-4", temperature=0, query=""):
|
|
| 226 |
stream=False # 流式输出
|
| 227 |
)
|
| 228 |
return completion.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
else:
|
| 230 |
# 如果模型不支持,抛出异常
|
| 231 |
raise ValueError(f"Unsupported model: {model_name}")
|
|
@@ -236,7 +259,9 @@ if __name__ == "__main__":
|
|
| 236 |
file_path = './documents/LightZero_README_zh.md'
|
| 237 |
# model_name = "glm-4" # model_name=['abab6-chat', 'glm-4', 'gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo', 'azure_gpt-4', 'azure_gpt-35-turbo-16k', 'azure_gpt-35-turbo']
|
| 238 |
# model_name = 'azure_gpt-4'
|
| 239 |
-
model_name = 'kimi'
|
|
|
|
|
|
|
| 240 |
temperature = 0.01
|
| 241 |
embedding_model = 'OpenAI' # embedding_model=['HuggingFace', 'TensorflowHub', 'OpenAI']
|
| 242 |
|
|
|
|
| 18 |
from weaviate import Client
|
| 19 |
from weaviate.embedded import EmbeddedOptions
|
| 20 |
from zhipuai import ZhipuAI
|
| 21 |
+
from openai import AzureOpenAI, OpenAI
|
| 22 |
|
| 23 |
# 环境设置与文档下载
|
| 24 |
load_dotenv() # 加载环境变量
|
|
|
|
| 27 |
MIMIMAX_GROUP_ID = os.getenv("MIMIMAX_GROUP_ID")
|
| 28 |
ZHIPUAI_API_KEY = os.getenv("ZHIPUAI_API_KEY")
|
| 29 |
KIMI_OPENAI_API_KEY = os.getenv("KIMI_OPENAI_API_KEY")
|
| 30 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_OPENAI_API_KEY")
|
| 31 |
|
| 32 |
AZURE_OPENAI_KEY = os.getenv("AZURE_OPENAI_KEY")
|
| 33 |
AZURE_ENDPOINT = os.getenv("AZURE_ENDPOINT")
|
|
|
|
| 204 |
return response.choices[0].message.content
|
| 205 |
elif model_name == 'kimi':
|
| 206 |
# 如果是'kimi'模型,使用专门的API调用方式
|
|
|
|
| 207 |
client = OpenAI(
|
| 208 |
api_key=KIMI_OPENAI_API_KEY,
|
| 209 |
base_url="https://api.moonshot.cn/v1",
|
|
|
|
| 226 |
stream=False # 流式输出
|
| 227 |
)
|
| 228 |
return completion.choices[0].message.content
|
| 229 |
+
elif model_name == 'deepseek':
|
| 230 |
+
# 如果是'deepseek'模型,使用专门的API调用方式
|
| 231 |
+
client = OpenAI(
|
| 232 |
+
api_key="sk-c4a8fe52693a4aaab64e648c42f40be6",
|
| 233 |
+
base_url="https://api.deepseek.com"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
response = client.chat.completions.create(
|
| 237 |
+
model="deepseek-chat", # deepseek-coder
|
| 238 |
+
messages=[
|
| 239 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
| 240 |
+
{"role": "user", "content": query},
|
| 241 |
+
],
|
| 242 |
+
# max_tokens=4096,
|
| 243 |
+
# max_tokens=32000,
|
| 244 |
+
temperature=0.7,
|
| 245 |
+
stream=False,
|
| 246 |
+
frequency_penalty=0,
|
| 247 |
+
presence_penalty=0,
|
| 248 |
+
top_p=1,
|
| 249 |
+
logprobs=False,
|
| 250 |
+
)
|
| 251 |
+
return response.choices[0].message.content
|
| 252 |
else:
|
| 253 |
# 如果模型不支持,抛出异常
|
| 254 |
raise ValueError(f"Unsupported model: {model_name}")
|
|
|
|
| 259 |
file_path = './documents/LightZero_README_zh.md'
|
| 260 |
# model_name = "glm-4" # model_name=['abab6-chat', 'glm-4', 'gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo', 'azure_gpt-4', 'azure_gpt-35-turbo-16k', 'azure_gpt-35-turbo']
|
| 261 |
# model_name = 'azure_gpt-4'
|
| 262 |
+
# model_name = 'kimi'
|
| 263 |
+
model_name = 'deepseek'
|
| 264 |
+
|
| 265 |
temperature = 0.01
|
| 266 |
embedding_model = 'OpenAI' # embedding_model=['HuggingFace', 'TensorflowHub', 'OpenAI']
|
| 267 |
|