mirror of
https://github.com/Kakune55/PyGetGPT.git
synced 2025-09-15 03:39:31 +08:00
feat:完成简易的的前端和后端api
This commit is contained in:
32
model/glm-4-flash.py
Normal file
32
model/glm-4-flash.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from zhipuai import ZhipuAI
|
||||
from model.util import InputData, OutputData, getModelAPIKey
|
||||
|
||||
# client = ZhipuAI(api_key="") # 填写您自己的APIKey
|
||||
# response = client.chat.completions.create(
|
||||
# model="glm-4-0520", # 填写需要调用的模型编码
|
||||
# messages=[
|
||||
# {"role": "user", "content": "作为一名营销专家,请为我的产品创作一个吸引人的slogan"},
|
||||
# {"role": "assistant", "content": "当然,为了创作一个吸引人的slogan,请告诉我一些关于您产品的信息"},
|
||||
# {"role": "user", "content": "智谱AI开放平台"},
|
||||
# {"role": "assistant", "content": "智启未来,谱绘无限一智谱AI,让创新触手可及!"},
|
||||
# {"role": "user", "content": "创造一个更精准、吸引人的slogan"}
|
||||
# ],
|
||||
# )
|
||||
# print(response.choices[0].message)
|
||||
|
||||
|
||||
|
||||
def predict(input_data:InputData):
|
||||
client = ZhipuAI(api_key=getModelAPIKey("glm-4-flash"))
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4-flash", # 填写需要调用的模型编码
|
||||
messages=[
|
||||
{"role": "user", "content": input_data.message}],
|
||||
)
|
||||
if response.choices[0].finish_reason == "stop":
|
||||
return OutputData(response.choices[0].message.content,200,response.usage.total_tokens)
|
||||
elif response.choices[0].finish_reason == "length":
|
||||
return OutputData(response.choices[0].message.content,201,response.usage.total_tokens)
|
||||
elif response.choices[0].finish_reason == "network_error":
|
||||
return OutputData("Server Network Error",500,0)
|
||||
else: return OutputData("Unknown Error",500,0)
|
57
model/util.py
Normal file
57
model/util.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import importlib
|
||||
from configUtil import ConfigUtil
|
||||
|
||||
|
||||
class InputData:
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
|
||||
class OutputData:
|
||||
def __init__(self, message,code,tokenUsed):
|
||||
self.message = message
|
||||
self.code = code
|
||||
self.tokenUsed = tokenUsed
|
||||
|
||||
|
||||
|
||||
def getModels() -> list:
|
||||
model_config = ConfigUtil("data/models.ini")
|
||||
out = []
|
||||
for model in model_config.getSectionList():
|
||||
if model_config.getBool(model, "enabled") == False:
|
||||
continue
|
||||
out.append(model)
|
||||
return out
|
||||
|
||||
|
||||
def getModelsInfo() -> list:
|
||||
model_config = ConfigUtil("data/models.ini")
|
||||
out = []
|
||||
for model in model_config.getSectionList():
|
||||
if model_config.getBool(model, "enabled") == False:
|
||||
continue
|
||||
util = {
|
||||
"name":model_config.get(model, "name"),
|
||||
"id":model,
|
||||
}
|
||||
out.append(util)
|
||||
return out
|
||||
|
||||
|
||||
def getModelAPIKey(model: str) -> str:
|
||||
model_config = ConfigUtil("data/models.ini")
|
||||
try:
|
||||
return model_config.get(model, "key")
|
||||
except:
|
||||
return "Model API key not found"
|
||||
|
||||
def requestModel(model: str, input_data: InputData) -> OutputData:
|
||||
if model not in getModels():
|
||||
ret = OutputData("Model not found",404,0)
|
||||
else:
|
||||
module = importlib.import_module(f"model.{model}")
|
||||
ret = module.predict(input_data)
|
||||
resq = {}
|
||||
for key in ret.__dict__: resq[key] = ret.__dict__[key]
|
||||
return resq
|
||||
|
Reference in New Issue
Block a user