mirror of
https://github.com/Kakune55/PyGetGPT.git
synced 2025-05-06 18:29:24 +08:00
添加对GPT3.5-Turbo模型的支持
This commit is contained in:
parent
90403d78bb
commit
6654ea0953
28
src/Server/gpt35Turbo.py
Normal file
28
src/Server/gpt35Turbo.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
import openai , config
|
||||||
|
|
||||||
|
openai.api_key = config.readConf()["gpt3.5turbo"]["Authorization"]
|
||||||
|
openai.base_url = config.readConf()["gpt3.5turbo"]["url"]
|
||||||
|
|
||||||
|
def service(prompt,history = ""):
|
||||||
|
if history == "":
|
||||||
|
response = openai.chat.completions.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[
|
||||||
|
{"role": "user", "content": prompt},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.chat.completions.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[
|
||||||
|
{"role": "user", "content": history[1]["user"]},
|
||||||
|
{"role": "assistant", "content": history[1]["bot"]},
|
||||||
|
{"role": "user", "content": history[0]["user"]},
|
||||||
|
{"role": "assistant", "content": history[0]["bot"]},
|
||||||
|
{"role": "user", "content": prompt},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if response.choices[0].finish_reason == "stop":
|
||||||
|
return 200, response.choices[0].message.content, int(response.usage.total_tokens*3) #三倍tokens消耗
|
||||||
|
else:
|
||||||
|
return 50 , "API Error!", 0
|
@ -1,6 +1,6 @@
|
|||||||
import flask , requests , json
|
import flask , requests , json
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
import db , qwenTurbo ,chatglmTurbo
|
import db , qwenTurbo , chatglmTurbo , gpt35Turbo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -36,6 +36,12 @@ def post_data():
|
|||||||
elif userRequest["context"] == 0:
|
elif userRequest["context"] == 0:
|
||||||
code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt'])
|
code , output , tokenUsed = chatglmTurbo.service(userRequest['prompt'])
|
||||||
|
|
||||||
|
if userRequest["model"] == "gpt3.5-turbo": # 调用gpt3.5-turbo
|
||||||
|
if userRequest["context"] == 1: # 是否使用上文关联
|
||||||
|
code , output , tokenUsed = gpt35Turbo.service(userRequest['prompt'],userRequest['history'])
|
||||||
|
elif userRequest["context"] == 0:
|
||||||
|
code , output , tokenUsed = gpt35Turbo.service(userRequest['prompt'])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -110,6 +110,7 @@
|
|||||||
<select id="setUpDropdown" defaultValue="qwen-turbo" onchange="setCookie('modelSet', document.getElementById('setUpDropdown').value, 265)">
|
<select id="setUpDropdown" defaultValue="qwen-turbo" onchange="setCookie('modelSet', document.getElementById('setUpDropdown').value, 265)">
|
||||||
<option value="qwen-turbo">qwen-turbo</option>
|
<option value="qwen-turbo">qwen-turbo</option>
|
||||||
<option value="chatglm-turbo">chatglmTurbo</option>
|
<option value="chatglm-turbo">chatglmTurbo</option>
|
||||||
|
<option value="gpt3.5-turbo">gpt3.5-turbo(X3 Token)</option>
|
||||||
</select>
|
</select>
|
||||||
<hr>
|
<hr>
|
||||||
<h3>当前UserKey</h3>
|
<h3>当前UserKey</h3>
|
||||||
|
Loading…
x
Reference in New Issue
Block a user