diff --git a/src/Server/chatglmTurbo.py b/src/Server/chatglmTurbo.py new file mode 100644 index 0000000..f63dd55 --- /dev/null +++ b/src/Server/chatglmTurbo.py @@ -0,0 +1,27 @@ +import zhipuai + +zhipuai.api_key = "83f977afedc7f414dea579a2551298c2.sydItr1sZT8UPXWb" + +def service(prompt,history = ""): + if history == "": + response = zhipuai.model_api.invoke( + model="chatglm_turbo", + prompt=[ + {"role": "user", "content": prompt}, + ] + ) + else: + response = zhipuai.model_api.invoke( + model="chatglm_turbo", + prompt=[ + {"role": "user", "content": history[1]["user"]}, + {"role": "assistant", "content": history[1]["bot"]}, + {"role": "user", "content": history[0]["user"]}, + {"role": "assistant", "content": history[0]["bot"]}, + {"role": "user", "content": prompt}, + ] + ) + if response["code"] == 200: + return 200, response["data"]["choices"][0]["content"], response["data"]["usage"]['total_tokens'] + else: + return 50 , response["code"]+response["msg"], 0 diff --git a/src/Server/main.py b/src/Server/main.py index 62ffa39..450707d 100644 --- a/src/Server/main.py +++ b/src/Server/main.py @@ -1,6 +1,6 @@ import flask , requests , json from flask_cors import CORS -import db , qwenTurbo +import db , qwenTurbo ,chatglmTurbo @@ -24,11 +24,21 @@ def post_data(): elif surplusToken <= 0: return {"code":40,"output":"Token has been use up"} - if userRequest["model"] == "qwen-turbo": + if userRequest["model"] == "qwen-turbo": # 调用qwen-Turbo if userRequest["context"] == 1: # 是否使用上文关联 - code , output , tokenUsed = qwenTurbo.service(userRequest['userkey'],userRequest['prompt'],userRequest['history']) + code , output , tokenUsed = qwenTurbo.service(userRequest['prompt'],userRequest['history']) elif userRequest["context"] == 0: - code , output , tokenUsed = qwenTurbo.service(userRequest['userkey'],userRequest['prompt']) + code , output , tokenUsed = qwenTurbo.service(userRequest['prompt']) + + if userRequest["model"] == "chatglm-turbo": # 调用chatglm-turbo + if userRequest["context"] == 1: # 是否使用上文关联 + code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt'],userRequest['history']) + elif userRequest["context"] == 0: + code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt']) + + + + db.reduce_value(userRequest['userkey'], tokenUsed) return {"code":code,"output":output,"surplus":surplusToken} diff --git a/src/Server/qwenTurbo.py b/src/Server/qwenTurbo.py index 6bf1250..77d76fb 100644 --- a/src/Server/qwenTurbo.py +++ b/src/Server/qwenTurbo.py @@ -8,7 +8,7 @@ header = { "Authorization":"Bearer sk-69129a5d7fc6468a9f6f30d6935254c6" } -def service(userkey,prompt,history = ""): +def service(prompt,history = ""): # 设置请求数据 if history == "": data = { diff --git a/src/Web/index.html b/src/Web/index.html index 6627cba..4995769 100644 --- a/src/Web/index.html +++ b/src/Web/index.html @@ -108,6 +108,7 @@

使用的AI模型


当前UserKey

@@ -261,7 +262,7 @@ .then(data => { // 在这里处理返回的JSON数据 if (data["code"] == 200) { - appendMessage("KakuAI",data["output"]); + appendMessage("KakuAI"+"("+document.getElementById("setUpDropdown").value+")",data["output"]); document.getElementById("showtoken").innerHTML = "
剩余Tokens:" + data["surplus"]; } else{