mirror of
https://github.com/Kakune55/PyGetGPT.git
synced 2025-06-29 00:08:04 +08:00
添加chatglmTurbo模型
This commit is contained in:
parent
3c94b93196
commit
caccd39395
27
src/Server/chatglmTurbo.py
Normal file
27
src/Server/chatglmTurbo.py
Normal file
@ -0,0 +1,27 @@
|
||||
import zhipuai
|
||||
|
||||
zhipuai.api_key = "83f977afedc7f414dea579a2551298c2.sydItr1sZT8UPXWb"
|
||||
|
||||
def service(prompt,history = ""):
|
||||
if history == "":
|
||||
response = zhipuai.model_api.invoke(
|
||||
model="chatglm_turbo",
|
||||
prompt=[
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
)
|
||||
else:
|
||||
response = zhipuai.model_api.invoke(
|
||||
model="chatglm_turbo",
|
||||
prompt=[
|
||||
{"role": "user", "content": history[1]["user"]},
|
||||
{"role": "assistant", "content": history[1]["bot"]},
|
||||
{"role": "user", "content": history[0]["user"]},
|
||||
{"role": "assistant", "content": history[0]["bot"]},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
)
|
||||
if response["code"] == 200:
|
||||
return 200, response["data"]["choices"][0]["content"], response["data"]["usage"]['total_tokens']
|
||||
else:
|
||||
return 50 , response["code"]+response["msg"], 0
|
@ -1,6 +1,6 @@
|
||||
import flask , requests , json
|
||||
from flask_cors import CORS
|
||||
import db , qwenTurbo
|
||||
import db , qwenTurbo ,chatglmTurbo
|
||||
|
||||
|
||||
|
||||
@ -24,11 +24,21 @@ def post_data():
|
||||
elif surplusToken <= 0:
|
||||
return {"code":40,"output":"Token has been use up"}
|
||||
|
||||
if userRequest["model"] == "qwen-turbo":
|
||||
if userRequest["model"] == "qwen-turbo": # 调用qwen-Turbo
|
||||
if userRequest["context"] == 1: # 是否使用上文关联
|
||||
code , output , tokenUsed = qwenTurbo.service(userRequest['userkey'],userRequest['prompt'],userRequest['history'])
|
||||
code , output , tokenUsed = qwenTurbo.service(userRequest['prompt'],userRequest['history'])
|
||||
elif userRequest["context"] == 0:
|
||||
code , output , tokenUsed = qwenTurbo.service(userRequest['userkey'],userRequest['prompt'])
|
||||
code , output , tokenUsed = qwenTurbo.service(userRequest['prompt'])
|
||||
|
||||
if userRequest["model"] == "chatglm-turbo": # 调用chatglm-turbo
|
||||
if userRequest["context"] == 1: # 是否使用上文关联
|
||||
code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt'],userRequest['history'])
|
||||
elif userRequest["context"] == 0:
|
||||
code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt'])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
db.reduce_value(userRequest['userkey'], tokenUsed)
|
||||
return {"code":code,"output":output,"surplus":surplusToken}
|
||||
|
@ -8,7 +8,7 @@ header = {
|
||||
"Authorization":"Bearer sk-69129a5d7fc6468a9f6f30d6935254c6"
|
||||
}
|
||||
|
||||
def service(userkey,prompt,history = ""):
|
||||
def service(prompt,history = ""):
|
||||
# 设置请求数据
|
||||
if history == "":
|
||||
data = {
|
||||
|
@ -108,6 +108,7 @@
|
||||
<p>使用的AI模型</p>
|
||||
<select id="setUpDropdown" defaultValue="qwen-turbo">
|
||||
<option value="qwen-turbo">qwen-turbo</option>
|
||||
<option value="chatglm-turbo">chatglmTurbo</option>
|
||||
</select>
|
||||
<hr>
|
||||
<h3>当前UserKey</h3>
|
||||
@ -261,7 +262,7 @@
|
||||
.then(data => {
|
||||
// 在这里处理返回的JSON数据
|
||||
if (data["code"] == 200) {
|
||||
appendMessage("KakuAI",data["output"]);
|
||||
appendMessage("KakuAI"+"("+document.getElementById("setUpDropdown").value+")",data["output"]);
|
||||
document.getElementById("showtoken").innerHTML = "<br>剩余Tokens:" + data["surplus"];
|
||||
}
|
||||
else{
|
||||
|
Loading…
x
Reference in New Issue
Block a user