diff --git a/.gitignore b/.gitignore index 0509d67..7924214 100644 --- a/.gitignore +++ b/.gitignore @@ -149,6 +149,8 @@ dmypy.json # pytype static type analyzer .pytype/ +.idea/ + # Cython debug symbols cython_debug/ diff --git a/free_one_api/entities/channel.py b/free_one_api/entities/channel.py index a53d70e..2f46c01 100644 --- a/free_one_api/entities/channel.py +++ b/free_one_api/entities/channel.py @@ -80,7 +80,7 @@ def count_tokens( num_tokens = 0 for message in messages: for key, value in message.items(): - num_tokens += len(encoding.encode(value)) + num_tokens += len(encoding.encode(str(value))) num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> return num_tokens diff --git a/free_one_api/impls/adapter/kimi.py b/free_one_api/impls/adapter/kimi.py new file mode 100644 index 0000000..f620681 --- /dev/null +++ b/free_one_api/impls/adapter/kimi.py @@ -0,0 +1,117 @@ +import asyncio +import typing +import traceback +import uuid +import random + +import revkimi.kimichat as kimi + +from free_one_api.entities import request, response + +from ...models import adapter +from ...models.adapter import llm +from ...entities import request, response, exceptions +from ...models.channel import evaluation + + +@adapter.llm_adapter +class KimiAdapter(llm.LLMLibAdapter): + + @classmethod + def name(cls) -> str: + return "DrTang/revKimi" + + @classmethod + def description(self) -> str: + return "suck my pussy" + + def supported_models(self) -> list[str]: + return [ + "gpt-3.5-turbo", + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + ] + + def function_call_supported(self) -> bool: + return False + + def stream_mode_supported(self) -> bool: + return True + + def multi_round_supported(self) -> bool: + return True + + @classmethod + def config_comment(cls) -> str: + return \ + """ + You should provide cookie string as `cookie` in config: + { + "cookie": "your cookie string" + } + + """ + + @classmethod + def supported_path(cls) -> str: + return "/v1/chat/completions" + + chatbot: kimi.Chatbot + + def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation): + self.config = config + self.eval = eval + self.chatbot = kimi.Chatbot( + cookies_str=config['cookie'] + ) + + async def test(self) -> typing.Union[bool, str]: + try: + resp =self.chatbot.ask( + prompt="Hello, reply 'hi' only.", + conversation_id="", # 会话ID(不填则会新建) + timeout=10, # 超时时间(默认10秒 + use_search=False # 是否使用搜索 + ) + + return True, "" + except Exception as e: + traceback.print_exc() + return False, str(e) + + async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]: + prompt = "" + + for msg in req.messages: + prompt += f"{msg['role']}: {msg['content']}\n" + + prompt += "assistant: " + + random_int = random.randint(0, 1000000000) + + resp =self.chatbot.ask( + prompt=prompt, + conversation_id="", # 会话ID(不填则会新建) + timeout=10, # 超时时间(默认10秒 + use_search=True # 是否使用搜索 + ) + + yield response.Response( + id=random_int, + finish_reason=response.FinishReason.NULL, + normal_message=resp['text'], + function_call=None + ) diff --git a/free_one_api/impls/adapter/qianwen.py b/free_one_api/impls/adapter/qianwen.py index 413dd50..99381f1 100644 --- a/free_one_api/impls/adapter/qianwen.py +++ b/free_one_api/impls/adapter/qianwen.py @@ -15,11 +15,11 @@ @adapter.llm_adapter class QianWenAdapter(llm.LLMLibAdapter): - + @classmethod def name(cls) -> str: return "xw5xr6/revTongYi" - + @classmethod def description(self) -> str: return "Use leeeduke/revTongYi to access Aliyun TongYi QianWen." @@ -27,7 +27,21 @@ def description(self) -> str: def supported_models(self) -> list[str]: return [ "gpt-3.5-turbo", - "gpt-4" + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", ] def function_call_supported(self) -> bool: @@ -38,84 +52,83 @@ def stream_mode_supported(self) -> bool: def multi_round_supported(self) -> bool: return True - + @classmethod def config_comment(cls) -> str: return \ -"""RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/ -You should provide cookie string as `cookie` in config: -{ - "cookie": "your cookie string" -} - -Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi -""" + """RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/ + You should provide cookie string as `cookie` in config: + { + "cookie": "your cookie string" + } + + Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi + """ @classmethod def supported_path(cls) -> str: return "/v1/chat/completions" - + chatbot: qwen.Chatbot - + def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation): self.config = config self.eval = eval self.chatbot = qwen.Chatbot( cookies_str=config['cookie'] ) - + async def test(self) -> typing.Union[bool, str]: try: # self.chatbot.create_session("Hello, reply 'hi' only.") self.chatbot.sessionId = "" resp = self.chatbot.ask( - "Hello, reply 'hi' only.", - sessionId="" + "Hello, reply 'hi' only." ) - - self.chatbot.delete_session(resp.sessionId) - + print(resp) + self.chatbot.delete_session(resp['sessionId']) + return True, "" except Exception as e: traceback.print_exc() return False, str(e) - + async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]: prompt = "" - + for msg in req.messages: prompt += f"{msg['role']}: {msg['content']}\n" - + prompt += "assistant: " - + random_int = random.randint(0, 1000000000) - + prev_text = "" sessionId = "" self.chatbot.sessionId = "" - + for resp in self.chatbot.ask( - prompt=prompt, - sessionId="", - stream=True, + prompt=prompt, + # sessionId="", + stream=True, ): - if resp.contents == None or len(resp.contents) == 0: + if resp['contents'] == None or len(resp['contents']) == 0: continue - sessionId = resp.sessionId - + sessionId = resp['sessionId'] + yield response.Response( id=random_int, finish_reason=response.FinishReason.NULL, - normal_message=resp.contents[0].content.replace(prev_text, ""), + normal_message=resp['contents'][0]['content'].replace(prev_text, ""), function_call=None ) - prev_text = resp.contents[0].content - + prev_text = resp['contents'][0]['content'] + self.chatbot.delete_session(sessionId) - + yield response.Response( id=random_int, finish_reason=response.FinishReason.STOP, diff --git a/free_one_api/impls/adapter/re_gpt.py b/free_one_api/impls/adapter/re_gpt.py index b029979..3eb0c32 100644 --- a/free_one_api/impls/adapter/re_gpt.py +++ b/free_one_api/impls/adapter/re_gpt.py @@ -15,7 +15,7 @@ @adapter.llm_adapter class ReGPTAdapter(llm.LLMLibAdapter): - + @classmethod def name(cls) -> str: return "Zai-Kun/reverse-engineered-chatgpt" @@ -23,7 +23,7 @@ def name(cls) -> str: @classmethod def description(self) -> str: return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition." - + def supported_models(self) -> list[str]: return [ "gpt-3.5-turbo", @@ -32,13 +32,13 @@ def supported_models(self) -> list[str]: def function_call_supported(self) -> bool: return False - + def stream_mode_supported(self) -> bool: return True def multi_round_supported(self) -> bool: return True - + @classmethod def config_comment(cls) -> str: return \ @@ -87,10 +87,10 @@ async def query( req: request.Request ) -> typing.AsyncGenerator[response.Response, None]: prompt = "" - + for msg in req.messages: prompt += f"{msg['role']}: {msg['content']}\n" - + prompt += "assistant: " random_int = random.randint(0, 1000000) diff --git a/free_one_api/impls/adapter/tiangong.py b/free_one_api/impls/adapter/tiangong.py new file mode 100644 index 0000000..b7b06de --- /dev/null +++ b/free_one_api/impls/adapter/tiangong.py @@ -0,0 +1,111 @@ +import asyncio +import typing +import traceback +import uuid +import random + +import revTianGong.tiangong as tiangong + +from free_one_api.entities import request, response + +from ...models import adapter +from ...models.adapter import llm +from ...entities import request, response, exceptions +from ...models.channel import evaluation + + +@adapter.llm_adapter +class TianGongAdapter(llm.LLMLibAdapter): + + @classmethod + def name(cls) -> str: + return "DrTang/revTiangong" + + @classmethod + def description(self) -> str: + return "suck my dick" + + def supported_models(self) -> list[str]: + return [ + "gpt-3.5-turbo", + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + ] + + def function_call_supported(self) -> bool: + return False + + def stream_mode_supported(self) -> bool: + return True + + def multi_round_supported(self) -> bool: + return True + + @classmethod + def config_comment(cls) -> str: + return \ + """ + You should provide cookie string as `cookie` in config: + { + "cookie": "your cookie string" + } + + """ + + @classmethod + def supported_path(cls) -> str: + return "/v1/chat/completions" + + chatbot: tiangong.Chatbot + + def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation): + self.config = config + self.eval = eval + self.chatbot = tiangong.Chatbot( + cookies_str=config['cookie'] + ) + + async def test(self) -> typing.Union[bool, str]: + try: + resp =await self.chatbot.ask( + prompt="Hello, reply 'hi' only." + ) + + return True, "" + except Exception as e: + traceback.print_exc() + return False, str(e) + + async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]: + prompt = "" + + for msg in req.messages: + prompt += f"{msg['role']}: {msg['content']}\n" + + prompt += "assistant: " + + random_int = random.randint(0, 1000000000) + + resp =await (self.chatbot.ask( + prompt=prompt, + )) + + yield response.Response( + id=random_int, + finish_reason=response.FinishReason.NULL, + normal_message=resp['texts'], + function_call=None + ) diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py index e27fdff..ff7c05c 100644 --- a/free_one_api/impls/app.py +++ b/free_one_api/impls/app.py @@ -22,6 +22,8 @@ from .adapter import gpt4free from .adapter import hugchat from .adapter import qianwen +from .adapter import tiangong +from .adapter import kimi from .adapter import re_gpt from . import log @@ -39,22 +41,22 @@ class Application: channel: chanmgr.AbsChannelManager """Channel manager.""" - + key: keymgr.AbsAPIKeyManager """API Key manager.""" - + watchdog: wdmgr.AbsWatchDog - + logging_level: int = logging.INFO - + def __init__( - self, - dbmgr: db.DatabaseInterface, - router: routermgr.RouterManager, - channel: chanmgr.AbsChannelManager, - key: keymgr.AbsAPIKeyManager, - watchdog: wdmgr.AbsWatchDog, - logging_level: int = logging.INFO, + self, + dbmgr: db.DatabaseInterface, + router: routermgr.RouterManager, + channel: chanmgr.AbsChannelManager, + key: keymgr.AbsAPIKeyManager, + watchdog: wdmgr.AbsWatchDog, + logging_level: int = logging.INFO, ): self.dbmgr = dbmgr self.router = router @@ -62,15 +64,16 @@ def __init__( self.key = key self.watchdog = watchdog self.logging_level = logging_level - + async def run(self): """Run application.""" loop = asyncio.get_running_loop() - + loop.create_task(self.watchdog.run()) - + await self.router.serve(loop) + log_colors_config = { 'DEBUG': 'green', # cyan white 'INFO': 'white', @@ -117,6 +120,7 @@ async def run(self): } } + async def make_application(config_path: str) -> Application: """Make application.""" if not os.path.exists(config_path): @@ -131,72 +135,73 @@ async def make_application(config_path: str) -> Application: # complete config config = cfgutil.complete_config(config, default_config) - + # dump config with open(config_path, "w") as f: yaml.dump(config, f) # logging logging_level = logging.INFO - + if 'logging' in config and 'debug' in config['logging'] and config['logging']['debug']: logging_level = logging.DEBUG - + if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'true': logging_level = logging.DEBUG - + print("Logging level:", logging_level) logging.debug("Debug mode enabled.") - + terminal_out = logging.StreamHandler() - + terminal_out.setLevel(logging_level) terminal_out.setFormatter(colorlog.ColoredFormatter( "[%(asctime)s.%(msecs)03d] %(log_color)s%(pathname)s (%(lineno)d) - [%(levelname)s] :\n" - "%(message)s", + "%(message)s", datefmt="%Y-%m-%d %H:%M:%S", log_colors=log_colors_config, )) - + for handler in logging.getLogger().handlers: logging.getLogger().removeHandler(handler) - + logging.getLogger().addHandler(terminal_out) # save ad to runtime if 'random_ad' in config and config['random_ad']['enabled']: from ..common import randomad - + randomad.enabled = config['random_ad']['enabled'] randomad.rate = config['random_ad']['rate'] randomad.ads = config['random_ad']['ad_list'] - + from ..common import randomad # make database manager from .database import sqlite as sqlitedb - + dbmgr_cls_mapping = { "sqlite": sqlitedb.SQLiteDB, } - + dbmgr = dbmgr_cls_mapping[config['database']['type']](config['database']) await dbmgr.initialize() - + # database handler dblogger = log.SQLiteHandler(dbmgr) - + # failed to set debug level for db handler dblogger.setLevel(logging.INFO if logging_level <= logging.INFO else logging_level) - dblogger.setFormatter(logging.Formatter("[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s")) - + dblogger.setFormatter( + logging.Formatter("[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s")) + logging.getLogger().addHandler(dblogger) # set default values # apply adapters config if 'misc' in config and 'chatgpt_api_base' in config['misc']: # backward compatibility config['adapters']['acheong08_ChatGPT']['reverse_proxy'] = config['misc']['chatgpt_api_base'] - + adapter_config_mapping = { "acheong08_ChatGPT": revChatGPT.RevChatGPTAdapter, "KoushikNavuluri_Claude-API": claude.ClaudeAdapter, @@ -204,6 +209,8 @@ async def make_application(config_path: str) -> Application: "xtekky_gpt4free": gpt4free.GPT4FreeAdapter, "Soulter_hugging-chat-api": hugchat.HuggingChatAdapter, "xw5xr6_revTongYi": qianwen.QianWenAdapter, + "DrTang": tiangong.tiangong, + "DrTang": kimi.KimiAdapter, "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter, } @@ -213,66 +220,66 @@ async def make_application(config_path: str) -> Application: for k, v in config["adapters"][adapter_name].items(): setattr(adapter_config_mapping[adapter_name], k, v) - + # make channel manager from .channel import mgr as chanmgr - + channelmgr = chanmgr.ChannelManager(dbmgr) await channelmgr.load_channels() - + # make key manager from .key import mgr as keymgr - + apikeymgr = keymgr.APIKeyManager(dbmgr) await apikeymgr.list_keys() - + # make forward manager from .forward import mgr as forwardmgr - + fwdmgr = forwardmgr.ForwardManager(channelmgr, apikeymgr) - + # make router manager from .router import mgr as routermgr - + # import all api groups from .router import forward as forwardgroup from .router import api as apigroup from .router import web as webgroup - + # ========= API Groups ========= group_forward = forwardgroup.ForwardAPIGroup(dbmgr, channelmgr, apikeymgr, fwdmgr) group_api = apigroup.WebAPIGroup(dbmgr, channelmgr, apikeymgr) group_api.tokens = [crypto.md5_digest(config['router']['token'])] group_web = webgroup.WebPageGroup(config['web'], config['router']) - + paths = [] - + paths += group_forward.get_routers() paths += group_web.get_routers() paths += group_api.get_routers() - + # ========= API Groups ========= - + routermgr = routermgr.RouterManager( routes=paths, config=config['router'], ) - + # watchdog and tasks from .watchdog import wd as watchdog - + wdmgr = watchdog.WatchDog() - + # tasks from .watchdog.tasks import heartbeat - + hbtask = heartbeat.HeartBeatTask( channelmgr, config['watchdog']['heartbeat'], ) - + wdmgr.add_task(hbtask) - + app = Application( dbmgr=dbmgr, router=routermgr, @@ -281,8 +288,7 @@ async def make_application(config_path: str) -> Application: watchdog=wdmgr, logging_level=logging_level, ) - + logging.info("Application initialized.") - + return app - \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 85d0b2b..2a2473b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,6 @@ hugchat g4f revTongYi colorlog -git+https://github.com/Zai-Kun/reverse-engineered-chatgpt \ No newline at end of file +git+https://github.com/Zai-Kun/reverse-engineered-chatgpt +git+https://github.com/dd123-a/revTiangong.git +git+https://github.com/dd123-a/revkimi.git \ No newline at end of file diff --git a/web/package-lock.json b/web/package-lock.json index bef9f52..47e881b 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -804,9 +804,9 @@ "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" }, "node_modules/follow-redirects": { - "version": "1.15.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", - "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", + "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", "funding": [ { "type": "individual",