更改项目文件夹结构

This commit is contained in:
2023-12-10 16:31:50 +08:00
parent 8d9bd619c4
commit e00a704a56
24 changed files with 12 additions and 14 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

27
apiModule/chatglmTurbo.py Normal file
View File

@@ -0,0 +1,27 @@
import zhipuai , config
zhipuai.api_key = config.readConf()["chatglmturbo"]["Authorization"]
def service(prompt,history = ""):
if history == "":
response = zhipuai.model_api.invoke(
model="chatglm_turbo",
prompt=[
{"role": "user", "content": prompt},
]
)
else:
response = zhipuai.model_api.invoke(
model="chatglm_turbo",
prompt=[
{"role": "user", "content": history[1]["user"]},
{"role": "assistant", "content": history[1]["bot"]},
{"role": "user", "content": history[0]["user"]},
{"role": "assistant", "content": history[0]["bot"]},
{"role": "user", "content": prompt},
]
)
if response["code"] == 200:
return 200, str(response["data"]["choices"][0]["content"]).split('"')[1], response["data"]["usage"]['total_tokens']
else:
return 50 , str(response["code"])+response["msg"], 0

28
apiModule/gpt35Turbo.py Normal file
View File

@@ -0,0 +1,28 @@
import openai , config
openai.api_key = config.readConf()["gpt3.5turbo"]["Authorization"]
openai.base_url = config.readConf()["gpt3.5turbo"]["url"]
def service(prompt,history = ""):
if history == "":
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt},
]
)
else:
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": history[1]["user"]},
{"role": "assistant", "content": history[1]["bot"]},
{"role": "user", "content": history[0]["user"]},
{"role": "assistant", "content": history[0]["bot"]},
{"role": "user", "content": prompt},
]
)
if response.choices[0].finish_reason == "stop":
return 200, response.choices[0].message.content, int(response.usage.total_tokens*3) #三倍tokens消耗
else:
return 50 , "API Error!", 0

28
apiModule/gpt4Turbo.py Normal file
View File

@@ -0,0 +1,28 @@
import openai , config
openai.api_key = config.readConf()["gpt3.5turbo"]["Authorization"]
openai.base_url = config.readConf()["gpt3.5turbo"]["url"]
def service(prompt,history = ""):
if history == "":
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "user", "content": prompt},
]
)
else:
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "user", "content": history[1]["user"]},
{"role": "assistant", "content": history[1]["bot"]},
{"role": "user", "content": history[0]["user"]},
{"role": "assistant", "content": history[0]["bot"]},
{"role": "user", "content": prompt},
]
)
if response.choices[0].finish_reason == "stop":
return 200, response.choices[0].message.content, int(response.usage.total_tokens*45) #45倍tokens消耗
else:
return 50 , "API Error!", 0

31
apiModule/qwenTurbo.py Normal file
View File

@@ -0,0 +1,31 @@
import requests , json , config
# 设置请求的目标URL
url = config.readConf()["qwenturbo"]["url"] # 替换为你的API端点URL
header = {
"Content-Type":"application/json",
"Authorization":config.readConf()["qwenturbo"]["Authorization"]
}
def service(prompt,history = ""):
# 设置请求数据
if history == "":
data = {
"model": "qwen-turbo",
"input":{
"prompt":f"{prompt}"
}
}
else:
data = {
"model": "qwen-turbo",
"input":{
"prompt":f"{prompt}",
"history":history
}
}
# 发送POST请求
response = json.loads(requests.post(url, json=data ,headers=header).text)
if 'code' in response:
return 50,response['code']+response['message'],0
return 200,response['output']['text'],response["usage"]["total_tokens"]