Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

适配新的revtongyi,添加天工ai支持,进行一些细节修改适配maestro项目 #111

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ dmypy.json
# pytype static type analyzer
.pytype/

.idea/

# Cython debug symbols
cython_debug/

Expand Down
2 changes: 1 addition & 1 deletion free_one_api/entities/channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def count_tokens(
num_tokens = 0
for message in messages:
for key, value in message.items():
num_tokens += len(encoding.encode(value))
num_tokens += len(encoding.encode(str(value)))
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens

Expand Down
117 changes: 117 additions & 0 deletions free_one_api/impls/adapter/kimi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import asyncio
import typing
import traceback
import uuid
import random

import revkimi.kimichat as kimi

from free_one_api.entities import request, response

from ...models import adapter
from ...models.adapter import llm
from ...entities import request, response, exceptions
from ...models.channel import evaluation


@adapter.llm_adapter
class KimiAdapter(llm.LLMLibAdapter):

@classmethod
def name(cls) -> str:
return "DrTang/revKimi"

@classmethod
def description(self) -> str:
return "suck my pussy"

def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
"gpt-4",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
]

def function_call_supported(self) -> bool:
return False

def stream_mode_supported(self) -> bool:
return True

def multi_round_supported(self) -> bool:
return True

@classmethod
def config_comment(cls) -> str:
return \
"""
You should provide cookie string as `cookie` in config:
{
"cookie": "your cookie string"
}

"""

@classmethod
def supported_path(cls) -> str:
return "/v1/chat/completions"

chatbot: kimi.Chatbot

def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
self.config = config
self.eval = eval
self.chatbot = kimi.Chatbot(
cookies_str=config['cookie']
)

async def test(self) -> typing.Union[bool, str]:
try:
resp =self.chatbot.ask(
prompt="Hello, reply 'hi' only.",
conversation_id="", # 会话ID(不填则会新建)
timeout=10, # 超时时间(默认10秒
use_search=False # 是否使用搜索
)

return True, ""
except Exception as e:
traceback.print_exc()
return False, str(e)

async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""

for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"

prompt += "assistant: "

random_int = random.randint(0, 1000000000)

resp =self.chatbot.ask(
prompt=prompt,
conversation_id="", # 会话ID(不填则会新建)
timeout=10, # 超时时间(默认10秒
use_search=True # 是否使用搜索
)

yield response.Response(
id=random_int,
finish_reason=response.FinishReason.NULL,
normal_message=resp['text'],
function_call=None
)
85 changes: 49 additions & 36 deletions free_one_api/impls/adapter/qianwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,33 @@

@adapter.llm_adapter
class QianWenAdapter(llm.LLMLibAdapter):

@classmethod
def name(cls) -> str:
return "xw5xr6/revTongYi"

@classmethod
def description(self) -> str:
return "Use leeeduke/revTongYi to access Aliyun TongYi QianWen."

def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
"gpt-4"
"gpt-4",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
]

def function_call_supported(self) -> bool:
Expand All @@ -38,84 +52,83 @@ def stream_mode_supported(self) -> bool:

def multi_round_supported(self) -> bool:
return True

@classmethod
def config_comment(cls) -> str:
return \
"""RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/
You should provide cookie string as `cookie` in config:
{
"cookie": "your cookie string"
}

Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi
"""
"""RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/
You should provide cookie string as `cookie` in config:
{
"cookie": "your cookie string"
}
Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi
"""

@classmethod
def supported_path(cls) -> str:
return "/v1/chat/completions"

chatbot: qwen.Chatbot

def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
self.config = config
self.eval = eval
self.chatbot = qwen.Chatbot(
cookies_str=config['cookie']
)

async def test(self) -> typing.Union[bool, str]:
try:
# self.chatbot.create_session("Hello, reply 'hi' only.")
self.chatbot.sessionId = ""
resp = self.chatbot.ask(
"Hello, reply 'hi' only.",
sessionId=""
"Hello, reply 'hi' only."
)

self.chatbot.delete_session(resp.sessionId)
print(resp)
self.chatbot.delete_session(resp['sessionId'])

return True, ""
except Exception as e:
traceback.print_exc()
return False, str(e)

async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""

for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"

prompt += "assistant: "

random_int = random.randint(0, 1000000000)

prev_text = ""

sessionId = ""

self.chatbot.sessionId = ""

for resp in self.chatbot.ask(
prompt=prompt,
sessionId="",
stream=True,
prompt=prompt,
# sessionId="",
stream=True,
):
if resp.contents == None or len(resp.contents) == 0:
if resp['contents'] == None or len(resp['contents']) == 0:
continue

sessionId = resp.sessionId
sessionId = resp['sessionId']

yield response.Response(
id=random_int,
finish_reason=response.FinishReason.NULL,
normal_message=resp.contents[0].content.replace(prev_text, ""),
normal_message=resp['contents'][0]['content'].replace(prev_text, ""),
function_call=None
)
prev_text = resp.contents[0].content
prev_text = resp['contents'][0]['content']

self.chatbot.delete_session(sessionId)

yield response.Response(
id=random_int,
finish_reason=response.FinishReason.STOP,
Expand Down
12 changes: 6 additions & 6 deletions free_one_api/impls/adapter/re_gpt.py
dd123-a marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@

@adapter.llm_adapter
class ReGPTAdapter(llm.LLMLibAdapter):

@classmethod
def name(cls) -> str:
return "Zai-Kun/reverse-engineered-chatgpt"

@classmethod
def description(self) -> str:
return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition."

def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
Expand All @@ -32,13 +32,13 @@ def supported_models(self) -> list[str]:

def function_call_supported(self) -> bool:
return False

def stream_mode_supported(self) -> bool:
return True

def multi_round_supported(self) -> bool:
return True

@classmethod
def config_comment(cls) -> str:
return \
Expand Down Expand Up @@ -87,10 +87,10 @@ async def query(
req: request.Request
) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""

for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"

prompt += "assistant: "

random_int = random.randint(0, 1000000)
Expand Down
Loading