添加对GPT4.0-Turbo模型的支持

This commit is contained in:
Kakune55 2023-12-09 15:06:00 +08:00
parent 50445d8af3
commit 8d9bd619c4
3 changed files with 36 additions and 1 deletions

28
src/Server/gpt4Turbo.py Normal file
View File

@ -0,0 +1,28 @@
import openai , config
openai.api_key = config.readConf()["gpt3.5turbo"]["Authorization"]
openai.base_url = config.readConf()["gpt3.5turbo"]["url"]
def service(prompt,history = ""):
if history == "":
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "user", "content": prompt},
]
)
else:
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "user", "content": history[1]["user"]},
{"role": "assistant", "content": history[1]["bot"]},
{"role": "user", "content": history[0]["user"]},
{"role": "assistant", "content": history[0]["bot"]},
{"role": "user", "content": prompt},
]
)
if response.choices[0].finish_reason == "stop":
return 200, response.choices[0].message.content, int(response.usage.total_tokens*45) #45倍tokens消耗
else:
return 50 , "API Error!", 0

View File

@ -1,6 +1,6 @@
import flask , requests , json
from flask_cors import CORS
import db , qwenTurbo , chatglmTurbo , gpt35Turbo
import db , qwenTurbo , chatglmTurbo , gpt35Turbo , gpt4Turbo
@ -42,6 +42,12 @@ def post_data():
elif userRequest["context"] == 0:
code , output , tokenUsed = gpt35Turbo.service(userRequest['prompt'])
if userRequest["model"] == "gpt4.0-turbo": # 调用gpt4.0-turbo
if userRequest["context"] == 1: # 是否使用上文关联
code , output , tokenUsed = gpt4Turbo.service(userRequest['prompt'],userRequest['history'])
elif userRequest["context"] == 0:
code , output , tokenUsed = gpt4Turbo.service(userRequest['prompt'])

View File

@ -111,6 +111,7 @@
<option value="qwen-turbo">qwen-turbo</option>
<option value="chatglm-turbo">chatglmTurbo</option>
<option value="gpt3.5-turbo">gpt3.5-turbo(X3 Token)</option>
<option value="gpt4.0-turbo">gpt4.0-turbo(X45 Token)</option>
</select>
<hr>
<h3>当前UserKey</h3>