From b6bd8cbe2390fec6a442bee5919ea827e5010792 Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 15:38:39 +0800
Subject: [PATCH 1/9] =?UTF-8?q?=E9=80=82=E9=85=8D=E6=96=B0=E7=9A=84revtong?=
=?UTF-8?q?yi=EF=BC=8C=E6=B7=BB=E5=8A=A0=E5=A4=A9=E5=B7=A5ai=E6=94=AF?=
=?UTF-8?q?=E6=8C=81?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.idea/.gitignore | 8 +
.idea/dataSources.xml | 17 ++
.idea/deployment.xml | 21 ++
.idea/free-one-api.iml | 12 +
.idea/inspectionProfiles/Project_Default.xml | 30 +++
.../inspectionProfiles/profiles_settings.xml | 6 +
.idea/misc.xml | 4 +
.idea/modules.xml | 8 +
.idea/vcs.xml | 6 +
free_one_api/entities/channel.py | 2 +-
free_one_api/impls/adapter/qianwen.py | 85 +++---
free_one_api/impls/adapter/re_gpt.py | 250 +++++++++---------
free_one_api/impls/adapter/tiangong.py | 111 ++++++++
free_one_api/impls/app.py | 114 ++++----
requirements.txt | 3 +-
revTianGong/__init__.py | 0
revTianGong/entity.py | 161 +++++++++++
revTianGong/errors.py | 2 +
revTianGong/tiangong.py | 221 ++++++++++++++++
web/package-lock.json | 6 +-
20 files changed, 845 insertions(+), 222 deletions(-)
create mode 100644 .idea/.gitignore
create mode 100644 .idea/dataSources.xml
create mode 100644 .idea/deployment.xml
create mode 100644 .idea/free-one-api.iml
create mode 100644 .idea/inspectionProfiles/Project_Default.xml
create mode 100644 .idea/inspectionProfiles/profiles_settings.xml
create mode 100644 .idea/misc.xml
create mode 100644 .idea/modules.xml
create mode 100644 .idea/vcs.xml
create mode 100644 free_one_api/impls/adapter/tiangong.py
create mode 100644 revTianGong/__init__.py
create mode 100644 revTianGong/entity.py
create mode 100644 revTianGong/errors.py
create mode 100644 revTianGong/tiangong.py
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..35410ca
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# 默认忽略的文件
+/shelf/
+/workspace.xml
+# 基于编辑器的 HTTP 客户端请求
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
new file mode 100644
index 0000000..e411390
--- /dev/null
+++ b/.idea/dataSources.xml
@@ -0,0 +1,17 @@
+
+
+
+
+ sqlite.xerial
+ true
+ org.sqlite.JDBC
+ jdbc:sqlite:C:\Users\DrTang\Desktop\free-one-api\data\free_one_api.db
+ $ProjectFileDir$
+
+
+ file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.43.0/org/xerial/sqlite-jdbc/3.43.0.0/sqlite-jdbc-3.43.0.0.jar
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/deployment.xml b/.idea/deployment.xml
new file mode 100644
index 0000000..791df0d
--- /dev/null
+++ b/.idea/deployment.xml
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/free-one-api.iml b/.idea/free-one-api.iml
new file mode 100644
index 0000000..ff54199
--- /dev/null
+++ b/.idea/free-one-api.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..3185820
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..43fd3cc
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..4cb1a12
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/free_one_api/entities/channel.py b/free_one_api/entities/channel.py
index a53d70e..2f46c01 100644
--- a/free_one_api/entities/channel.py
+++ b/free_one_api/entities/channel.py
@@ -80,7 +80,7 @@ def count_tokens(
num_tokens = 0
for message in messages:
for key, value in message.items():
- num_tokens += len(encoding.encode(value))
+ num_tokens += len(encoding.encode(str(value)))
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
diff --git a/free_one_api/impls/adapter/qianwen.py b/free_one_api/impls/adapter/qianwen.py
index 413dd50..99381f1 100644
--- a/free_one_api/impls/adapter/qianwen.py
+++ b/free_one_api/impls/adapter/qianwen.py
@@ -15,11 +15,11 @@
@adapter.llm_adapter
class QianWenAdapter(llm.LLMLibAdapter):
-
+
@classmethod
def name(cls) -> str:
return "xw5xr6/revTongYi"
-
+
@classmethod
def description(self) -> str:
return "Use leeeduke/revTongYi to access Aliyun TongYi QianWen."
@@ -27,7 +27,21 @@ def description(self) -> str:
def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
- "gpt-4"
+ "gpt-4",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
]
def function_call_supported(self) -> bool:
@@ -38,84 +52,83 @@ def stream_mode_supported(self) -> bool:
def multi_round_supported(self) -> bool:
return True
-
+
@classmethod
def config_comment(cls) -> str:
return \
-"""RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/
-You should provide cookie string as `cookie` in config:
-{
- "cookie": "your cookie string"
-}
-
-Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi
-"""
+ """RevTongYi use cookies that can be extracted from https://qianwen.aliyun.com/
+ You should provide cookie string as `cookie` in config:
+ {
+ "cookie": "your cookie string"
+ }
+
+ Method of getting cookie string, please refer to https://github.com/leeeduke/revTongYi
+ """
@classmethod
def supported_path(cls) -> str:
return "/v1/chat/completions"
-
+
chatbot: qwen.Chatbot
-
+
def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
self.config = config
self.eval = eval
self.chatbot = qwen.Chatbot(
cookies_str=config['cookie']
)
-
+
async def test(self) -> typing.Union[bool, str]:
try:
# self.chatbot.create_session("Hello, reply 'hi' only.")
self.chatbot.sessionId = ""
resp = self.chatbot.ask(
- "Hello, reply 'hi' only.",
- sessionId=""
+ "Hello, reply 'hi' only."
)
-
- self.chatbot.delete_session(resp.sessionId)
-
+ print(resp)
+ self.chatbot.delete_session(resp['sessionId'])
+
return True, ""
except Exception as e:
traceback.print_exc()
return False, str(e)
-
+
async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""
-
+
for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"
-
+
prompt += "assistant: "
-
+
random_int = random.randint(0, 1000000000)
-
+
prev_text = ""
sessionId = ""
self.chatbot.sessionId = ""
-
+
for resp in self.chatbot.ask(
- prompt=prompt,
- sessionId="",
- stream=True,
+ prompt=prompt,
+ # sessionId="",
+ stream=True,
):
- if resp.contents == None or len(resp.contents) == 0:
+ if resp['contents'] == None or len(resp['contents']) == 0:
continue
- sessionId = resp.sessionId
-
+ sessionId = resp['sessionId']
+
yield response.Response(
id=random_int,
finish_reason=response.FinishReason.NULL,
- normal_message=resp.contents[0].content.replace(prev_text, ""),
+ normal_message=resp['contents'][0]['content'].replace(prev_text, ""),
function_call=None
)
- prev_text = resp.contents[0].content
-
+ prev_text = resp['contents'][0]['content']
+
self.chatbot.delete_session(sessionId)
-
+
yield response.Response(
id=random_int,
finish_reason=response.FinishReason.STOP,
diff --git a/free_one_api/impls/adapter/re_gpt.py b/free_one_api/impls/adapter/re_gpt.py
index b029979..35b09bf 100644
--- a/free_one_api/impls/adapter/re_gpt.py
+++ b/free_one_api/impls/adapter/re_gpt.py
@@ -1,125 +1,125 @@
-import typing
-import traceback
-import uuid
-import random
-
-import requests
-import re_gpt
-
-from ...models import adapter
-from ...models.adapter import llm
-from ...entities import request
-from ...entities import response, exceptions
-from ...models.channel import evaluation
-
-
-@adapter.llm_adapter
-class ReGPTAdapter(llm.LLMLibAdapter):
-
- @classmethod
- def name(cls) -> str:
- return "Zai-Kun/reverse-engineered-chatgpt"
-
- @classmethod
- def description(self) -> str:
- return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition."
-
- def supported_models(self) -> list[str]:
- return [
- "gpt-3.5-turbo",
- "gpt-4"
- ]
-
- def function_call_supported(self) -> bool:
- return False
-
- def stream_mode_supported(self) -> bool:
- return True
-
- def multi_round_supported(self) -> bool:
- return True
-
- @classmethod
- def config_comment(cls) -> str:
- return \
-"""Please provide `session_token` to config as:
-
-{
- "session_token": "your session"
-}
-
-Session token can be found from the cookies named `__Secure-next-auth.session-token` in the browser.
-"""
-
- _chatbot: re_gpt.SyncChatGPT = None
-
- @property
- def chatbot(self) -> re_gpt.SyncChatGPT:
- if self._chatbot is None:
- self._chatbot = re_gpt.SyncChatGPT(**self.config)
- return self._chatbot
-
- @classmethod
- def supported_path(self) -> str:
- return "/v1/chat/completions"
-
- def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
- self.config = config
- self.eval = eval
-
- async def test(self) -> typing.Union[bool, str]:
-
- with self.chatbot as chatbot:
- conversation = chatbot.create_new_conversation()
-
- try:
- for message in conversation.chat("Hi, respond 'hello, world!' please."):
- pass
-
- return True, ''
- except Exception as e:
- return False, str(e)
- finally:
- chatbot.delete_conversation(conversation.conversation_id)
-
- async def query(
- self,
- req: request.Request
- ) -> typing.AsyncGenerator[response.Response, None]:
- prompt = ""
-
- for msg in req.messages:
- prompt += f"{msg['role']}: {msg['content']}\n"
-
- prompt += "assistant: "
-
- random_int = random.randint(0, 1000000)
-
- with self.chatbot as chatbot:
- conversation = chatbot.create_new_conversation()
- try:
-
- for message in conversation.chat(
- user_input=prompt
- ):
- if message["content"] == "":
- continue
-
- yield response.Response(
- id=random_int,
- finish_reason=response.FinishReason.NULL,
- normal_message=message["content"],
- function_call=None
- )
- except Exception as e:
- traceback.print_exc()
- raise e
- finally:
- chatbot.delete_conversation(conversation.conversation_id)
-
- yield response.Response(
- id=random_int,
- finish_reason=response.FinishReason.STOP,
- normal_message="",
- function_call=None
- )
\ No newline at end of file
+# import typing
+# import traceback
+# import uuid
+# import random
+#
+# import requests
+# import re_gpt
+#
+# from ...models import adapter
+# from ...models.adapter import llm
+# from ...entities import request
+# from ...entities import response, exceptions
+# from ...models.channel import evaluation
+#
+#
+# @adapter.llm_adapter
+# class ReGPTAdapter(llm.LLMLibAdapter):
+#
+# @classmethod
+# def name(cls) -> str:
+# return "Zai-Kun/reverse-engineered-chatgpt"
+#
+# @classmethod
+# def description(self) -> str:
+# return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition."
+#
+# def supported_models(self) -> list[str]:
+# return [
+# "gpt-3.5-turbo",
+# "gpt-4"
+# ]
+#
+# def function_call_supported(self) -> bool:
+# return False
+#
+# def stream_mode_supported(self) -> bool:
+# return True
+#
+# def multi_round_supported(self) -> bool:
+# return True
+#
+# @classmethod
+# def config_comment(cls) -> str:
+# return \
+# """Please provide `session_token` to config as:
+#
+# {
+# "session_token": "your session"
+# }
+#
+# Session token can be found from the cookies named `__Secure-next-auth.session-token` in the browser.
+# """
+#
+# _chatbot: re_gpt.SyncChatGPT = None
+#
+# @property
+# def chatbot(self) -> re_gpt.SyncChatGPT:
+# if self._chatbot is None:
+# self._chatbot = re_gpt.SyncChatGPT(**self.config)
+# return self._chatbot
+#
+# @classmethod
+# def supported_path(self) -> str:
+# return "/v1/chat/completions"
+#
+# def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
+# self.config = config
+# self.eval = eval
+#
+# async def test(self) -> typing.Union[bool, str]:
+#
+# with self.chatbot as chatbot:
+# conversation = chatbot.create_new_conversation()
+#
+# try:
+# for message in conversation.chat("Hi, respond 'hello, world!' please."):
+# pass
+#
+# return True, ''
+# except Exception as e:
+# return False, str(e)
+# finally:
+# chatbot.delete_conversation(conversation.conversation_id)
+#
+# async def query(
+# self,
+# req: request.Request
+# ) -> typing.AsyncGenerator[response.Response, None]:
+# prompt = ""
+#
+# for msg in req.messages:
+# prompt += f"{msg['role']}: {msg['content']}\n"
+#
+# prompt += "assistant: "
+#
+# random_int = random.randint(0, 1000000)
+#
+# with self.chatbot as chatbot:
+# conversation = chatbot.create_new_conversation()
+# try:
+#
+# for message in conversation.chat(
+# user_input=prompt
+# ):
+# if message["content"] == "":
+# continue
+#
+# yield response.Response(
+# id=random_int,
+# finish_reason=response.FinishReason.NULL,
+# normal_message=message["content"],
+# function_call=None
+# )
+# except Exception as e:
+# traceback.print_exc()
+# raise e
+# finally:
+# chatbot.delete_conversation(conversation.conversation_id)
+#
+# yield response.Response(
+# id=random_int,
+# finish_reason=response.FinishReason.STOP,
+# normal_message="",
+# function_call=None
+# )
\ No newline at end of file
diff --git a/free_one_api/impls/adapter/tiangong.py b/free_one_api/impls/adapter/tiangong.py
new file mode 100644
index 0000000..c6ea5a6
--- /dev/null
+++ b/free_one_api/impls/adapter/tiangong.py
@@ -0,0 +1,111 @@
+import asyncio
+import typing
+import traceback
+import uuid
+import random
+
+import revTianGong.tiangong as tiangong
+
+from free_one_api.entities import request, response
+
+from ...models import adapter
+from ...models.adapter import llm
+from ...entities import request, response, exceptions
+from ...models.channel import evaluation
+
+
+@adapter.llm_adapter
+class QianWenAdapter(llm.LLMLibAdapter):
+
+ @classmethod
+ def name(cls) -> str:
+ return "DrTang/revTiangong"
+
+ @classmethod
+ def description(self) -> str:
+ return "suck my dick"
+
+ def supported_models(self) -> list[str]:
+ return [
+ "gpt-3.5-turbo",
+ "gpt-4",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ ]
+
+ def function_call_supported(self) -> bool:
+ return False
+
+ def stream_mode_supported(self) -> bool:
+ return True
+
+ def multi_round_supported(self) -> bool:
+ return True
+
+ @classmethod
+ def config_comment(cls) -> str:
+ return \
+ """
+ You should provide cookie string as `cookie` in config:
+ {
+ "cookie": "your cookie string"
+ }
+
+ """
+
+ @classmethod
+ def supported_path(cls) -> str:
+ return "/v1/chat/completions"
+
+ chatbot: tiangong.Chatbot
+
+ def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
+ self.config = config
+ self.eval = eval
+ self.chatbot = tiangong.Chatbot(
+ cookies_str=config['cookie']
+ )
+
+ async def test(self) -> typing.Union[bool, str]:
+ try:
+ resp =await self.chatbot.ask(
+ prompt="Hello, reply 'hi' only."
+ )
+
+ return True, ""
+ except Exception as e:
+ traceback.print_exc()
+ return False, str(e)
+
+ async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
+ prompt = ""
+
+ for msg in req.messages:
+ prompt += f"{msg['role']}: {msg['content']}\n"
+
+ prompt += "assistant: "
+
+ random_int = random.randint(0, 1000000000)
+
+ resp =await (self.chatbot.ask(
+ prompt=prompt,
+ ))
+
+ yield response.Response(
+ id=random_int,
+ finish_reason=response.FinishReason.NULL,
+ normal_message=resp['texts'],
+ function_call=None
+ )
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index e27fdff..9a69d6b 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -22,6 +22,7 @@
from .adapter import gpt4free
from .adapter import hugchat
from .adapter import qianwen
+from .adapter import tiangong
from .adapter import re_gpt
from . import log
@@ -39,22 +40,22 @@ class Application:
channel: chanmgr.AbsChannelManager
"""Channel manager."""
-
+
key: keymgr.AbsAPIKeyManager
"""API Key manager."""
-
+
watchdog: wdmgr.AbsWatchDog
-
+
logging_level: int = logging.INFO
-
+
def __init__(
- self,
- dbmgr: db.DatabaseInterface,
- router: routermgr.RouterManager,
- channel: chanmgr.AbsChannelManager,
- key: keymgr.AbsAPIKeyManager,
- watchdog: wdmgr.AbsWatchDog,
- logging_level: int = logging.INFO,
+ self,
+ dbmgr: db.DatabaseInterface,
+ router: routermgr.RouterManager,
+ channel: chanmgr.AbsChannelManager,
+ key: keymgr.AbsAPIKeyManager,
+ watchdog: wdmgr.AbsWatchDog,
+ logging_level: int = logging.INFO,
):
self.dbmgr = dbmgr
self.router = router
@@ -62,15 +63,16 @@ def __init__(
self.key = key
self.watchdog = watchdog
self.logging_level = logging_level
-
+
async def run(self):
"""Run application."""
loop = asyncio.get_running_loop()
-
+
loop.create_task(self.watchdog.run())
-
+
await self.router.serve(loop)
+
log_colors_config = {
'DEBUG': 'green', # cyan white
'INFO': 'white',
@@ -117,6 +119,7 @@ async def run(self):
}
}
+
async def make_application(config_path: str) -> Application:
"""Make application."""
if not os.path.exists(config_path):
@@ -131,72 +134,73 @@ async def make_application(config_path: str) -> Application:
# complete config
config = cfgutil.complete_config(config, default_config)
-
+
# dump config
with open(config_path, "w") as f:
yaml.dump(config, f)
# logging
logging_level = logging.INFO
-
+
if 'logging' in config and 'debug' in config['logging'] and config['logging']['debug']:
logging_level = logging.DEBUG
-
+
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'true':
logging_level = logging.DEBUG
-
+
print("Logging level:", logging_level)
logging.debug("Debug mode enabled.")
-
+
terminal_out = logging.StreamHandler()
-
+
terminal_out.setLevel(logging_level)
terminal_out.setFormatter(colorlog.ColoredFormatter(
"[%(asctime)s.%(msecs)03d] %(log_color)s%(pathname)s (%(lineno)d) - [%(levelname)s] :\n"
- "%(message)s",
+ "%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
log_colors=log_colors_config,
))
-
+
for handler in logging.getLogger().handlers:
logging.getLogger().removeHandler(handler)
-
+
logging.getLogger().addHandler(terminal_out)
# save ad to runtime
if 'random_ad' in config and config['random_ad']['enabled']:
from ..common import randomad
-
+
randomad.enabled = config['random_ad']['enabled']
randomad.rate = config['random_ad']['rate']
randomad.ads = config['random_ad']['ad_list']
-
+
from ..common import randomad
# make database manager
from .database import sqlite as sqlitedb
-
+
dbmgr_cls_mapping = {
"sqlite": sqlitedb.SQLiteDB,
}
-
+
dbmgr = dbmgr_cls_mapping[config['database']['type']](config['database'])
await dbmgr.initialize()
-
+
# database handler
dblogger = log.SQLiteHandler(dbmgr)
-
+
# failed to set debug level for db handler
dblogger.setLevel(logging.INFO if logging_level <= logging.INFO else logging_level)
- dblogger.setFormatter(logging.Formatter("[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s"))
-
+ dblogger.setFormatter(
+ logging.Formatter("[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s"))
+
logging.getLogger().addHandler(dblogger)
# set default values
# apply adapters config
if 'misc' in config and 'chatgpt_api_base' in config['misc']: # backward compatibility
config['adapters']['acheong08_ChatGPT']['reverse_proxy'] = config['misc']['chatgpt_api_base']
-
+
adapter_config_mapping = {
"acheong08_ChatGPT": revChatGPT.RevChatGPTAdapter,
"KoushikNavuluri_Claude-API": claude.ClaudeAdapter,
@@ -204,7 +208,8 @@ async def make_application(config_path: str) -> Application:
"xtekky_gpt4free": gpt4free.GPT4FreeAdapter,
"Soulter_hugging-chat-api": hugchat.HuggingChatAdapter,
"xw5xr6_revTongYi": qianwen.QianWenAdapter,
- "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
+ "DrTang": tiangong.tiangong
+ # "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
}
for adapter_name in adapter_config_mapping:
@@ -213,66 +218,66 @@ async def make_application(config_path: str) -> Application:
for k, v in config["adapters"][adapter_name].items():
setattr(adapter_config_mapping[adapter_name], k, v)
-
+
# make channel manager
from .channel import mgr as chanmgr
-
+
channelmgr = chanmgr.ChannelManager(dbmgr)
await channelmgr.load_channels()
-
+
# make key manager
from .key import mgr as keymgr
-
+
apikeymgr = keymgr.APIKeyManager(dbmgr)
await apikeymgr.list_keys()
-
+
# make forward manager
from .forward import mgr as forwardmgr
-
+
fwdmgr = forwardmgr.ForwardManager(channelmgr, apikeymgr)
-
+
# make router manager
from .router import mgr as routermgr
-
+
# import all api groups
from .router import forward as forwardgroup
from .router import api as apigroup
from .router import web as webgroup
-
+
# ========= API Groups =========
group_forward = forwardgroup.ForwardAPIGroup(dbmgr, channelmgr, apikeymgr, fwdmgr)
group_api = apigroup.WebAPIGroup(dbmgr, channelmgr, apikeymgr)
group_api.tokens = [crypto.md5_digest(config['router']['token'])]
group_web = webgroup.WebPageGroup(config['web'], config['router'])
-
+
paths = []
-
+
paths += group_forward.get_routers()
paths += group_web.get_routers()
paths += group_api.get_routers()
-
+
# ========= API Groups =========
-
+
routermgr = routermgr.RouterManager(
routes=paths,
config=config['router'],
)
-
+
# watchdog and tasks
from .watchdog import wd as watchdog
-
+
wdmgr = watchdog.WatchDog()
-
+
# tasks
from .watchdog.tasks import heartbeat
-
+
hbtask = heartbeat.HeartBeatTask(
channelmgr,
config['watchdog']['heartbeat'],
)
-
+
wdmgr.add_task(hbtask)
-
+
app = Application(
dbmgr=dbmgr,
router=routermgr,
@@ -281,8 +286,7 @@ async def make_application(config_path: str) -> Application:
watchdog=wdmgr,
logging_level=logging_level,
)
-
+
logging.info("Application initialized.")
-
+
return app
-
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 85d0b2b..94340d5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,5 +8,4 @@ bardapi
hugchat
g4f
revTongYi
-colorlog
-git+https://github.com/Zai-Kun/reverse-engineered-chatgpt
\ No newline at end of file
+colorlog
\ No newline at end of file
diff --git a/revTianGong/__init__.py b/revTianGong/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/revTianGong/entity.py b/revTianGong/entity.py
new file mode 100644
index 0000000..0db6dfa
--- /dev/null
+++ b/revTianGong/entity.py
@@ -0,0 +1,161 @@
+class ChatContent:
+ """
+ 对话内容模版
+ """
+ text: str
+ contentType: str
+ cardId: str
+ author: str
+ createdAt: str
+ options_title: str
+ type: str
+ suggestion: dict
+
+ # 初始化方法,接收一个字典参数,将字典的键值对转化为类的属性
+ def __init__(self, content: dict):
+ self.__dict__ = content
+
+ # 支持通过键名方式获取属性值,类似于字典操作
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ # 支持通过键名方式设置属性值,类似于字典操作
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ # 覆盖默认的字符串转换方法,返回类属性构成的字典字符串形式
+ def __str__(self):
+ return str(self.__dict__)
+
+ # 覆盖默认的呈现方法,返回类属性构成的字典字符串形式
+ def __repr__(self):
+ return str(self.__dict__)
+
+
+class TianGongChatResponse:
+ # 响应内容类型
+ type: str
+ # 一组对话内容,每一项为ChatContent对象
+ contents: list[ChatContent] | None
+ # 消息状态
+ card_type: str
+ # 消息ID
+ target: str
+ # 父消息ID
+ conversation_id: str
+ # 会话ID
+ ask_id: str
+
+ request_id: str
+
+ app_copilot_input: str
+
+ # 初始化方法,接收一个字典参数,将其中的对话响应内容解析为类属性
+ def __init__(self, response: dict):
+ # 将原始响应字典中的部分键值包装进类属性,并将"contents"字段转换为ChatContent对象列表
+ packaged_response = {
+ "type": response["type"],
+ # "card_type": response["card_type"],
+ "target": response["target"],
+ "conversation_id": response["conversation_id"],
+ "ask_id": response["ask_id"],
+ "message": [ChatContent(message) for message in response["arguments"]] if response.get(
+ "arguments") else None,
+ "request_id": response["request_id"],
+ # "app_copilot_input": response["app_copilot_input"]
+ }
+ self.__dict__ = packaged_response
+
+ # 同样支持通过键名方式获取属性值
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ # 同样支持通过键名方式设置属性值
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ # 返回类属性构成的字典字符串形式
+ def __str__(self):
+ return str(self.__dict__)
+
+ # 返回类属性构成的字典字符串形式
+ def __repr__(self):
+ return str(self.__dict__)
+
+
+# 定义历史记录响应类,用于封装一段对话历史记录的信息
+class HistoryResponse:
+ """
+ 历史记录响应模版
+ """
+ # 会话ID
+ sessionId: str
+ # 消息ID
+ msgId: str
+ # 消息状态
+ msgStatus: str
+ # 父消息ID
+ parentMsgId: str
+ # 内容类型
+ contentType: str
+ # 一组对话内容,每一项为ChatContent对象
+ contents: list[ChatContent] | None
+ # 发送者类型,比如用户、机器人等
+ senderType: str
+ # 创建时间戳
+ createTime: int
+
+ # 初始化方法,接收一个字典参数,将历史记录信息解析为类属性
+ def __init__(self, response: dict):
+ # 将历史记录响应字典中的信息包装进类属性,并将"contents"字段转换为ChatContent对象列表
+ packaged_response = {
+ "sessionId": response["sessionId"],
+ "msgId": response["msgId"],
+ "msgStatus": response["msgStatus"],
+ "parentMsgId": response["parentMsgId"],
+ "contentType": response["contentType"],
+ "contents": [ChatContent(content) for content in response["contents"]] if response.get(
+ "contents") else None,
+ "senderType": response["senderType"],
+ "createTime": response["createTime"]
+ }
+ self.__dict__ = packaged_response
+
+ # 同样支持通过键名方式获取属性值
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ # 同样支持通过键名方式设置属性值
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ # 返回类属性构成的字典字符串形式
+ def __str__(self):
+ return str(self.__dict__)
+
+ # 返回类属性构成的字典字符串形式
+ def __repr__(self):
+ return str(self.__dict__)
+
+
+# 定义通用响应类,用于处理非特定格式的一般性响应数据
+class OrdinaryResponse:
+ # 初始化方法,接收一个字典参数,直接将其键值对转换为类属性
+ def __init__(self, response: dict):
+ self.__dict__ = response
+
+ # 同样支持通过键名方式获取属性值
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ # 同样支持通过键名方式设置属性值
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ # 返回类属性构成的字典字符串形式
+ def __str__(self):
+ return str(self.__dict__)
+
+ # 返回类属性构成的字典字符串形式
+ def __repr__(self):
+ return str(self.__dict__)
diff --git a/revTianGong/errors.py b/revTianGong/errors.py
new file mode 100644
index 0000000..3b75f87
--- /dev/null
+++ b/revTianGong/errors.py
@@ -0,0 +1,2 @@
+class TianGongProtocalError(Exception):
+ pass
\ No newline at end of file
diff --git a/revTianGong/tiangong.py b/revTianGong/tiangong.py
new file mode 100644
index 0000000..f9aa778
--- /dev/null
+++ b/revTianGong/tiangong.py
@@ -0,0 +1,221 @@
+import asyncio
+import hashlib
+import json
+import logging
+import typing
+import uuid
+
+import filetype
+import requests
+import websockets
+from fake_useragent import UserAgent
+from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
+
+from . import errors
+from .entity import *
+
+
+def gen_request_id() -> str:
+ """生成requestId"""
+ # uuid无分隔符
+ request_id = uuid.uuid4().hex
+ return request_id
+
+
+class Chatbot:
+ """天工 Chatbot 对象"""
+
+ api_base: str = "wss://work.tiangong.cn/agents_api/chat/ws?device=Web&device_id=825c6b2e8d2ebd9bb4808c55056b969c&device_hash=825c6b2e8d2ebd9bb4808c55056b969c&app_version=1.7.3"
+
+ cookies: dict
+
+ cookies_str: str
+
+ userId: str
+ """Current user id"""
+
+ title: str
+ """Title of current session"""
+
+ sessionId: str = ""
+ """Current session id"""
+
+ parentId: str = "0"
+ """Parent msg id"""
+
+ def __init__(
+ self,
+ cookies: dict = None,
+ cookies_str: str = "",
+ ):
+
+ if cookies and cookies_str:
+ raise ValueError("cookies和cookies_str不能同时存在")
+
+ if cookies:
+ self.cookies = cookies
+ self.cookies_str = ""
+ for key in cookies:
+ self.cookies_str += "{}={}; ".format(key, cookies[key])
+ elif cookies_str:
+ self.cookies_str = cookies_str
+
+ spt = self.cookies_str.split(";")
+
+ self.cookies = {}
+
+ for it in spt:
+ it = it.strip()
+ if it:
+ equ_loc = it.find("=")
+ key = it[:equ_loc]
+ value = it[equ_loc + 1:]
+ self.cookies[key] = value
+
+ logging.debug(self.cookies)
+
+ self.headers = {
+ 'Pragma': 'no-cache',
+ 'Origin': 'https://www.tiangong.cn',
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
+ 'Sec-WebSocket-Key': 'tb2KERRebVx0hf6Yr5HGgA==',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.3.1311 SLBChan/103',
+ 'Upgrade': 'websocket',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Version': '13',
+ 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits',
+ "Cookie": self.cookies_str
+ }
+
+ async def _stream_ask(
+ self,
+ prompt: str,
+ parentId: str = "0",
+ sessionId: str = "",
+ timeout: int = 60,
+ image: bytes = None
+ ) -> typing.Generator[TianGongChatResponse, None, None]:
+ """流式回复
+
+ Args:
+ prompt (str): 提问内容
+ parentId (str, optional): 父消息id. Defaults to "0".
+ sessionId (str, optional): 对话id. Defaults to "".
+ timeout (int, optional): 超时时间. Defaults to 60.
+ image (bytes, optional): 图片二进制数据. Defaults to None.
+ """
+ if parentId == "0":
+ self.parentId = self.parentId
+
+ headers = self.headers.copy()
+
+ headers['Accept'] = 'text/event-stream'
+
+ data = {
+ "agent_id": "016",
+ "agent_type": "universal",
+ "conversation_id": "6322210d-567b-42bb-983b-6c26f417d2f2",
+ "prompt": {
+ "action": None,
+ "ask_from": "user",
+ "ask_id": None,
+ "content": prompt,
+ "prompt_content": None,
+ "template_id": None,
+ "action": None,
+ "file": None,
+ "template": None,
+ "copilot": False,
+ "bubble_text": None,
+ "publish_agent": None,
+ "copilot_option": None
+ },
+ }
+ async with websockets.connect(self.api_base, extra_headers=headers) as websocket:
+ # 将JSON消息转换为字符串并发送
+ json_message_str = json.dumps(data)
+ await websocket.send(json_message_str)
+
+ while True:
+ try:
+ message = await websocket.recv()
+ data_dict = json.loads(message)
+ if message is not None and data_dict['type'] != 101:
+ result = TianGongChatResponse(data_dict)
+
+ if data_dict['type'] == 2:
+ break
+ yield result
+ except json.JSONDecodeError:
+ # 当接收到的消息无法解析为JSON时的处理
+ print("Received message cannot be decoded as JSON.")
+ break
+ except ConnectionClosedOK as e:
+ raise e
+ break
+ except ConnectionClosedError as e:
+ raise e
+ break
+
+ logging.debug("done: {}".format(result))
+
+ async def _non_stream_ask(
+ self,
+ prompt: str,
+ parentId: str = "0",
+ sessionId: str = "",
+ timeout: int = 60,
+ image: bytes = None
+ ) -> TianGongChatResponse:
+ """非流式回复
+
+ Args:
+ prompt (str): 提问内容
+ parentId (str, optional): 父消息id. Defaults to "0".
+ sessionId (str, optional): 对话id. Defaults to "".
+ timeout (int, optional): 超时时间. Defaults to 60.
+ image (bytes, optional): 图片二进制数据. Defaults to None.
+ """
+
+ result = {
+ 'texts': "",
+ 'suggestion': "",
+ }
+ async for message in self._stream_ask(prompt, parentId, sessionId, timeout, image):
+ # 更新result字典,对于已存在的键,覆盖其值;对于新键,则添加进字典
+ message = message.__dict__
+ if 'message' in message and isinstance(message['message'], list) and message['message']:
+ if 'text' in message['message'][0]['messages'][0]:
+ result['texts'] += str(message['message'][0]['messages'][0]['text'])
+ if 'suggestedResponses' in message['message'][0]['messages'][0]:
+ result['suggestion'] += str(message['message'][0]['messages'][0]['suggestedResponses'])
+ return result
+
+ async def ask(
+ self,
+ prompt: str,
+ parentId: str = "0",
+ sessionId: str = "",
+ timeout: int = 60,
+ stream: bool = False,
+ image: bytes = None
+ ) -> typing.Union[typing.Generator[TianGongChatResponse, None, None], TianGongChatResponse]:
+ """提问
+
+ Args:
+ prompt (str): 提问内容
+ parentId (str, optional): 父消息id. Defaults to "0".
+ sessionId (str, optional): 对话id. Defaults to "".
+ timeout (int, optional): 超时时间. Defaults to 60.
+ stream (bool, optional): 是否流式. Defaults to False.
+ image (bytes, optional): 图片二进制数据. Defaults to None.
+ """
+
+ return await (self._non_stream_ask(
+ prompt,
+ parentId,
+ sessionId,
+ timeout,
+ image
+ ))
diff --git a/web/package-lock.json b/web/package-lock.json
index bef9f52..47e881b 100644
--- a/web/package-lock.json
+++ b/web/package-lock.json
@@ -804,9 +804,9 @@
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="
},
"node_modules/follow-redirects": {
- "version": "1.15.6",
- "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
- "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
+ "version": "1.15.5",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz",
+ "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==",
"funding": [
{
"type": "individual",
From 62a61fb45c82576944cc566804cf7a3eb3f2df8e Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 15:51:44 +0800
Subject: [PATCH 2/9] =?UTF-8?q?=E7=BB=86=E8=8A=82=E4=BF=AE=E6=94=B9?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
free_one_api/impls/app.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index 9a69d6b..e9a72a3 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -23,7 +23,6 @@
from .adapter import hugchat
from .adapter import qianwen
from .adapter import tiangong
-from .adapter import re_gpt
from . import log
from . import cfg as cfgutil
From bbe34eaaa79f9905b0354152a1d4314f4523b8ca Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 17:12:18 +0800
Subject: [PATCH 3/9] =?UTF-8?q?=E5=8F=98=E6=9B=B4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
free_one_api/impls/adapter/re_gpt.py | 250 +++++++++++++--------------
1 file changed, 125 insertions(+), 125 deletions(-)
diff --git a/free_one_api/impls/adapter/re_gpt.py b/free_one_api/impls/adapter/re_gpt.py
index 35b09bf..3eb0c32 100644
--- a/free_one_api/impls/adapter/re_gpt.py
+++ b/free_one_api/impls/adapter/re_gpt.py
@@ -1,125 +1,125 @@
-# import typing
-# import traceback
-# import uuid
-# import random
-#
-# import requests
-# import re_gpt
-#
-# from ...models import adapter
-# from ...models.adapter import llm
-# from ...entities import request
-# from ...entities import response, exceptions
-# from ...models.channel import evaluation
-#
-#
-# @adapter.llm_adapter
-# class ReGPTAdapter(llm.LLMLibAdapter):
-#
-# @classmethod
-# def name(cls) -> str:
-# return "Zai-Kun/reverse-engineered-chatgpt"
-#
-# @classmethod
-# def description(self) -> str:
-# return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition."
-#
-# def supported_models(self) -> list[str]:
-# return [
-# "gpt-3.5-turbo",
-# "gpt-4"
-# ]
-#
-# def function_call_supported(self) -> bool:
-# return False
-#
-# def stream_mode_supported(self) -> bool:
-# return True
-#
-# def multi_round_supported(self) -> bool:
-# return True
-#
-# @classmethod
-# def config_comment(cls) -> str:
-# return \
-# """Please provide `session_token` to config as:
-#
-# {
-# "session_token": "your session"
-# }
-#
-# Session token can be found from the cookies named `__Secure-next-auth.session-token` in the browser.
-# """
-#
-# _chatbot: re_gpt.SyncChatGPT = None
-#
-# @property
-# def chatbot(self) -> re_gpt.SyncChatGPT:
-# if self._chatbot is None:
-# self._chatbot = re_gpt.SyncChatGPT(**self.config)
-# return self._chatbot
-#
-# @classmethod
-# def supported_path(self) -> str:
-# return "/v1/chat/completions"
-#
-# def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
-# self.config = config
-# self.eval = eval
-#
-# async def test(self) -> typing.Union[bool, str]:
-#
-# with self.chatbot as chatbot:
-# conversation = chatbot.create_new_conversation()
-#
-# try:
-# for message in conversation.chat("Hi, respond 'hello, world!' please."):
-# pass
-#
-# return True, ''
-# except Exception as e:
-# return False, str(e)
-# finally:
-# chatbot.delete_conversation(conversation.conversation_id)
-#
-# async def query(
-# self,
-# req: request.Request
-# ) -> typing.AsyncGenerator[response.Response, None]:
-# prompt = ""
-#
-# for msg in req.messages:
-# prompt += f"{msg['role']}: {msg['content']}\n"
-#
-# prompt += "assistant: "
-#
-# random_int = random.randint(0, 1000000)
-#
-# with self.chatbot as chatbot:
-# conversation = chatbot.create_new_conversation()
-# try:
-#
-# for message in conversation.chat(
-# user_input=prompt
-# ):
-# if message["content"] == "":
-# continue
-#
-# yield response.Response(
-# id=random_int,
-# finish_reason=response.FinishReason.NULL,
-# normal_message=message["content"],
-# function_call=None
-# )
-# except Exception as e:
-# traceback.print_exc()
-# raise e
-# finally:
-# chatbot.delete_conversation(conversation.conversation_id)
-#
-# yield response.Response(
-# id=random_int,
-# finish_reason=response.FinishReason.STOP,
-# normal_message="",
-# function_call=None
-# )
\ No newline at end of file
+import typing
+import traceback
+import uuid
+import random
+
+import requests
+import re_gpt
+
+from ...models import adapter
+from ...models.adapter import llm
+from ...entities import request
+from ...entities import response, exceptions
+from ...models.channel import evaluation
+
+
+@adapter.llm_adapter
+class ReGPTAdapter(llm.LLMLibAdapter):
+
+ @classmethod
+ def name(cls) -> str:
+ return "Zai-Kun/reverse-engineered-chatgpt"
+
+ @classmethod
+ def description(self) -> str:
+ return "Use Zai-Kun/reverse-engineered-chatgpt to access reverse engineering OpenAI ChatGPT web edition."
+
+ def supported_models(self) -> list[str]:
+ return [
+ "gpt-3.5-turbo",
+ "gpt-4"
+ ]
+
+ def function_call_supported(self) -> bool:
+ return False
+
+ def stream_mode_supported(self) -> bool:
+ return True
+
+ def multi_round_supported(self) -> bool:
+ return True
+
+ @classmethod
+ def config_comment(cls) -> str:
+ return \
+"""Please provide `session_token` to config as:
+
+{
+ "session_token": "your session"
+}
+
+Session token can be found from the cookies named `__Secure-next-auth.session-token` in the browser.
+"""
+
+ _chatbot: re_gpt.SyncChatGPT = None
+
+ @property
+ def chatbot(self) -> re_gpt.SyncChatGPT:
+ if self._chatbot is None:
+ self._chatbot = re_gpt.SyncChatGPT(**self.config)
+ return self._chatbot
+
+ @classmethod
+ def supported_path(self) -> str:
+ return "/v1/chat/completions"
+
+ def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
+ self.config = config
+ self.eval = eval
+
+ async def test(self) -> typing.Union[bool, str]:
+
+ with self.chatbot as chatbot:
+ conversation = chatbot.create_new_conversation()
+
+ try:
+ for message in conversation.chat("Hi, respond 'hello, world!' please."):
+ pass
+
+ return True, ''
+ except Exception as e:
+ return False, str(e)
+ finally:
+ chatbot.delete_conversation(conversation.conversation_id)
+
+ async def query(
+ self,
+ req: request.Request
+ ) -> typing.AsyncGenerator[response.Response, None]:
+ prompt = ""
+
+ for msg in req.messages:
+ prompt += f"{msg['role']}: {msg['content']}\n"
+
+ prompt += "assistant: "
+
+ random_int = random.randint(0, 1000000)
+
+ with self.chatbot as chatbot:
+ conversation = chatbot.create_new_conversation()
+ try:
+
+ for message in conversation.chat(
+ user_input=prompt
+ ):
+ if message["content"] == "":
+ continue
+
+ yield response.Response(
+ id=random_int,
+ finish_reason=response.FinishReason.NULL,
+ normal_message=message["content"],
+ function_call=None
+ )
+ except Exception as e:
+ traceback.print_exc()
+ raise e
+ finally:
+ chatbot.delete_conversation(conversation.conversation_id)
+
+ yield response.Response(
+ id=random_int,
+ finish_reason=response.FinishReason.STOP,
+ normal_message="",
+ function_call=None
+ )
\ No newline at end of file
From 856076e194e039f34526ce51d4a08778e38b40c2 Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 17:32:43 +0800
Subject: [PATCH 4/9] Exclude .idea folder from Git tracking.
---
.gitignore | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.gitignore b/.gitignore
index 0509d67..7924214 100644
--- a/.gitignore
+++ b/.gitignore
@@ -149,6 +149,8 @@ dmypy.json
# pytype static type analyzer
.pytype/
+.idea/
+
# Cython debug symbols
cython_debug/
From 6124b4ba62cccd5f70f37582a6c7e875a4a05acd Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 17:37:12 +0800
Subject: [PATCH 5/9] =?UTF-8?q?=E6=9B=B4=E6=94=B9?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.idea/.gitignore | 8 -----
.idea/dataSources.xml | 17 -----------
.idea/deployment.xml | 21 -------------
.idea/free-one-api.iml | 12 --------
.idea/inspectionProfiles/Project_Default.xml | 30 -------------------
.../inspectionProfiles/profiles_settings.xml | 6 ----
.idea/misc.xml | 4 ---
.idea/modules.xml | 8 -----
.idea/vcs.xml | 6 ----
free_one_api/impls/app.py | 5 ++--
requirements.txt | 3 +-
11 files changed, 5 insertions(+), 115 deletions(-)
delete mode 100644 .idea/.gitignore
delete mode 100644 .idea/dataSources.xml
delete mode 100644 .idea/deployment.xml
delete mode 100644 .idea/free-one-api.iml
delete mode 100644 .idea/inspectionProfiles/Project_Default.xml
delete mode 100644 .idea/inspectionProfiles/profiles_settings.xml
delete mode 100644 .idea/misc.xml
delete mode 100644 .idea/modules.xml
delete mode 100644 .idea/vcs.xml
diff --git a/.idea/.gitignore b/.idea/.gitignore
deleted file mode 100644
index 35410ca..0000000
--- a/.idea/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-# 默认忽略的文件
-/shelf/
-/workspace.xml
-# 基于编辑器的 HTTP 客户端请求
-/httpRequests/
-# Datasource local storage ignored files
-/dataSources/
-/dataSources.local.xml
diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
deleted file mode 100644
index e411390..0000000
--- a/.idea/dataSources.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-
-
- sqlite.xerial
- true
- org.sqlite.JDBC
- jdbc:sqlite:C:\Users\DrTang\Desktop\free-one-api\data\free_one_api.db
- $ProjectFileDir$
-
-
- file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.43.0/org/xerial/sqlite-jdbc/3.43.0.0/sqlite-jdbc-3.43.0.0.jar
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/deployment.xml b/.idea/deployment.xml
deleted file mode 100644
index 791df0d..0000000
--- a/.idea/deployment.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/free-one-api.iml b/.idea/free-one-api.iml
deleted file mode 100644
index ff54199..0000000
--- a/.idea/free-one-api.iml
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
deleted file mode 100644
index 3185820..0000000
--- a/.idea/inspectionProfiles/Project_Default.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 105ce2d..0000000
--- a/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index 43fd3cc..0000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index 4cb1a12..0000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 94a25f7..0000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index e9a72a3..2745627 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -23,6 +23,7 @@
from .adapter import hugchat
from .adapter import qianwen
from .adapter import tiangong
+from .adapter import re_gpt
from . import log
from . import cfg as cfgutil
@@ -207,8 +208,8 @@ async def make_application(config_path: str) -> Application:
"xtekky_gpt4free": gpt4free.GPT4FreeAdapter,
"Soulter_hugging-chat-api": hugchat.HuggingChatAdapter,
"xw5xr6_revTongYi": qianwen.QianWenAdapter,
- "DrTang": tiangong.tiangong
- # "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
+ "DrTang": tiangong.tiangong,
+ "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
}
for adapter_name in adapter_config_mapping:
diff --git a/requirements.txt b/requirements.txt
index 94340d5..85d0b2b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,4 +8,5 @@ bardapi
hugchat
g4f
revTongYi
-colorlog
\ No newline at end of file
+colorlog
+git+https://github.com/Zai-Kun/reverse-engineered-chatgpt
\ No newline at end of file
From 941fae5e2e4bbf6b5414e669b4c1c900257e5238 Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Mon, 8 Jul 2024 19:01:44 +0800
Subject: [PATCH 6/9] =?UTF-8?q?=E5=8E=BB=E9=99=A4=E9=A1=B9=E7=9B=AE?=
=?UTF-8?q?=E7=AC=AC=E4=B8=89=E6=96=B9=E5=BA=93?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
requirements.txt | 3 +-
revTianGong/__init__.py | 0
revTianGong/entity.py | 161 -----------------------------
revTianGong/errors.py | 2 -
revTianGong/tiangong.py | 221 ----------------------------------------
5 files changed, 2 insertions(+), 385 deletions(-)
delete mode 100644 revTianGong/__init__.py
delete mode 100644 revTianGong/entity.py
delete mode 100644 revTianGong/errors.py
delete mode 100644 revTianGong/tiangong.py
diff --git a/requirements.txt b/requirements.txt
index 85d0b2b..2134d28 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,4 +9,5 @@ hugchat
g4f
revTongYi
colorlog
-git+https://github.com/Zai-Kun/reverse-engineered-chatgpt
\ No newline at end of file
+git+https://github.com/Zai-Kun/reverse-engineered-chatgpt
+git+https://github.com/dd123-a/revTiangong.git
\ No newline at end of file
diff --git a/revTianGong/__init__.py b/revTianGong/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/revTianGong/entity.py b/revTianGong/entity.py
deleted file mode 100644
index 0db6dfa..0000000
--- a/revTianGong/entity.py
+++ /dev/null
@@ -1,161 +0,0 @@
-class ChatContent:
- """
- 对话内容模版
- """
- text: str
- contentType: str
- cardId: str
- author: str
- createdAt: str
- options_title: str
- type: str
- suggestion: dict
-
- # 初始化方法,接收一个字典参数,将字典的键值对转化为类的属性
- def __init__(self, content: dict):
- self.__dict__ = content
-
- # 支持通过键名方式获取属性值,类似于字典操作
- def __getitem__(self, key):
- return getattr(self, key)
-
- # 支持通过键名方式设置属性值,类似于字典操作
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- # 覆盖默认的字符串转换方法,返回类属性构成的字典字符串形式
- def __str__(self):
- return str(self.__dict__)
-
- # 覆盖默认的呈现方法,返回类属性构成的字典字符串形式
- def __repr__(self):
- return str(self.__dict__)
-
-
-class TianGongChatResponse:
- # 响应内容类型
- type: str
- # 一组对话内容,每一项为ChatContent对象
- contents: list[ChatContent] | None
- # 消息状态
- card_type: str
- # 消息ID
- target: str
- # 父消息ID
- conversation_id: str
- # 会话ID
- ask_id: str
-
- request_id: str
-
- app_copilot_input: str
-
- # 初始化方法,接收一个字典参数,将其中的对话响应内容解析为类属性
- def __init__(self, response: dict):
- # 将原始响应字典中的部分键值包装进类属性,并将"contents"字段转换为ChatContent对象列表
- packaged_response = {
- "type": response["type"],
- # "card_type": response["card_type"],
- "target": response["target"],
- "conversation_id": response["conversation_id"],
- "ask_id": response["ask_id"],
- "message": [ChatContent(message) for message in response["arguments"]] if response.get(
- "arguments") else None,
- "request_id": response["request_id"],
- # "app_copilot_input": response["app_copilot_input"]
- }
- self.__dict__ = packaged_response
-
- # 同样支持通过键名方式获取属性值
- def __getitem__(self, key):
- return getattr(self, key)
-
- # 同样支持通过键名方式设置属性值
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- # 返回类属性构成的字典字符串形式
- def __str__(self):
- return str(self.__dict__)
-
- # 返回类属性构成的字典字符串形式
- def __repr__(self):
- return str(self.__dict__)
-
-
-# 定义历史记录响应类,用于封装一段对话历史记录的信息
-class HistoryResponse:
- """
- 历史记录响应模版
- """
- # 会话ID
- sessionId: str
- # 消息ID
- msgId: str
- # 消息状态
- msgStatus: str
- # 父消息ID
- parentMsgId: str
- # 内容类型
- contentType: str
- # 一组对话内容,每一项为ChatContent对象
- contents: list[ChatContent] | None
- # 发送者类型,比如用户、机器人等
- senderType: str
- # 创建时间戳
- createTime: int
-
- # 初始化方法,接收一个字典参数,将历史记录信息解析为类属性
- def __init__(self, response: dict):
- # 将历史记录响应字典中的信息包装进类属性,并将"contents"字段转换为ChatContent对象列表
- packaged_response = {
- "sessionId": response["sessionId"],
- "msgId": response["msgId"],
- "msgStatus": response["msgStatus"],
- "parentMsgId": response["parentMsgId"],
- "contentType": response["contentType"],
- "contents": [ChatContent(content) for content in response["contents"]] if response.get(
- "contents") else None,
- "senderType": response["senderType"],
- "createTime": response["createTime"]
- }
- self.__dict__ = packaged_response
-
- # 同样支持通过键名方式获取属性值
- def __getitem__(self, key):
- return getattr(self, key)
-
- # 同样支持通过键名方式设置属性值
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- # 返回类属性构成的字典字符串形式
- def __str__(self):
- return str(self.__dict__)
-
- # 返回类属性构成的字典字符串形式
- def __repr__(self):
- return str(self.__dict__)
-
-
-# 定义通用响应类,用于处理非特定格式的一般性响应数据
-class OrdinaryResponse:
- # 初始化方法,接收一个字典参数,直接将其键值对转换为类属性
- def __init__(self, response: dict):
- self.__dict__ = response
-
- # 同样支持通过键名方式获取属性值
- def __getitem__(self, key):
- return getattr(self, key)
-
- # 同样支持通过键名方式设置属性值
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- # 返回类属性构成的字典字符串形式
- def __str__(self):
- return str(self.__dict__)
-
- # 返回类属性构成的字典字符串形式
- def __repr__(self):
- return str(self.__dict__)
diff --git a/revTianGong/errors.py b/revTianGong/errors.py
deleted file mode 100644
index 3b75f87..0000000
--- a/revTianGong/errors.py
+++ /dev/null
@@ -1,2 +0,0 @@
-class TianGongProtocalError(Exception):
- pass
\ No newline at end of file
diff --git a/revTianGong/tiangong.py b/revTianGong/tiangong.py
deleted file mode 100644
index f9aa778..0000000
--- a/revTianGong/tiangong.py
+++ /dev/null
@@ -1,221 +0,0 @@
-import asyncio
-import hashlib
-import json
-import logging
-import typing
-import uuid
-
-import filetype
-import requests
-import websockets
-from fake_useragent import UserAgent
-from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
-
-from . import errors
-from .entity import *
-
-
-def gen_request_id() -> str:
- """生成requestId"""
- # uuid无分隔符
- request_id = uuid.uuid4().hex
- return request_id
-
-
-class Chatbot:
- """天工 Chatbot 对象"""
-
- api_base: str = "wss://work.tiangong.cn/agents_api/chat/ws?device=Web&device_id=825c6b2e8d2ebd9bb4808c55056b969c&device_hash=825c6b2e8d2ebd9bb4808c55056b969c&app_version=1.7.3"
-
- cookies: dict
-
- cookies_str: str
-
- userId: str
- """Current user id"""
-
- title: str
- """Title of current session"""
-
- sessionId: str = ""
- """Current session id"""
-
- parentId: str = "0"
- """Parent msg id"""
-
- def __init__(
- self,
- cookies: dict = None,
- cookies_str: str = "",
- ):
-
- if cookies and cookies_str:
- raise ValueError("cookies和cookies_str不能同时存在")
-
- if cookies:
- self.cookies = cookies
- self.cookies_str = ""
- for key in cookies:
- self.cookies_str += "{}={}; ".format(key, cookies[key])
- elif cookies_str:
- self.cookies_str = cookies_str
-
- spt = self.cookies_str.split(";")
-
- self.cookies = {}
-
- for it in spt:
- it = it.strip()
- if it:
- equ_loc = it.find("=")
- key = it[:equ_loc]
- value = it[equ_loc + 1:]
- self.cookies[key] = value
-
- logging.debug(self.cookies)
-
- self.headers = {
- 'Pragma': 'no-cache',
- 'Origin': 'https://www.tiangong.cn',
- 'Accept-Language': 'zh-CN,zh;q=0.9',
- 'Sec-WebSocket-Key': 'tb2KERRebVx0hf6Yr5HGgA==',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.3.1311 SLBChan/103',
- 'Upgrade': 'websocket',
- 'Cache-Control': 'no-cache',
- 'Connection': 'Upgrade',
- 'Sec-WebSocket-Version': '13',
- 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits',
- "Cookie": self.cookies_str
- }
-
- async def _stream_ask(
- self,
- prompt: str,
- parentId: str = "0",
- sessionId: str = "",
- timeout: int = 60,
- image: bytes = None
- ) -> typing.Generator[TianGongChatResponse, None, None]:
- """流式回复
-
- Args:
- prompt (str): 提问内容
- parentId (str, optional): 父消息id. Defaults to "0".
- sessionId (str, optional): 对话id. Defaults to "".
- timeout (int, optional): 超时时间. Defaults to 60.
- image (bytes, optional): 图片二进制数据. Defaults to None.
- """
- if parentId == "0":
- self.parentId = self.parentId
-
- headers = self.headers.copy()
-
- headers['Accept'] = 'text/event-stream'
-
- data = {
- "agent_id": "016",
- "agent_type": "universal",
- "conversation_id": "6322210d-567b-42bb-983b-6c26f417d2f2",
- "prompt": {
- "action": None,
- "ask_from": "user",
- "ask_id": None,
- "content": prompt,
- "prompt_content": None,
- "template_id": None,
- "action": None,
- "file": None,
- "template": None,
- "copilot": False,
- "bubble_text": None,
- "publish_agent": None,
- "copilot_option": None
- },
- }
- async with websockets.connect(self.api_base, extra_headers=headers) as websocket:
- # 将JSON消息转换为字符串并发送
- json_message_str = json.dumps(data)
- await websocket.send(json_message_str)
-
- while True:
- try:
- message = await websocket.recv()
- data_dict = json.loads(message)
- if message is not None and data_dict['type'] != 101:
- result = TianGongChatResponse(data_dict)
-
- if data_dict['type'] == 2:
- break
- yield result
- except json.JSONDecodeError:
- # 当接收到的消息无法解析为JSON时的处理
- print("Received message cannot be decoded as JSON.")
- break
- except ConnectionClosedOK as e:
- raise e
- break
- except ConnectionClosedError as e:
- raise e
- break
-
- logging.debug("done: {}".format(result))
-
- async def _non_stream_ask(
- self,
- prompt: str,
- parentId: str = "0",
- sessionId: str = "",
- timeout: int = 60,
- image: bytes = None
- ) -> TianGongChatResponse:
- """非流式回复
-
- Args:
- prompt (str): 提问内容
- parentId (str, optional): 父消息id. Defaults to "0".
- sessionId (str, optional): 对话id. Defaults to "".
- timeout (int, optional): 超时时间. Defaults to 60.
- image (bytes, optional): 图片二进制数据. Defaults to None.
- """
-
- result = {
- 'texts': "",
- 'suggestion': "",
- }
- async for message in self._stream_ask(prompt, parentId, sessionId, timeout, image):
- # 更新result字典,对于已存在的键,覆盖其值;对于新键,则添加进字典
- message = message.__dict__
- if 'message' in message and isinstance(message['message'], list) and message['message']:
- if 'text' in message['message'][0]['messages'][0]:
- result['texts'] += str(message['message'][0]['messages'][0]['text'])
- if 'suggestedResponses' in message['message'][0]['messages'][0]:
- result['suggestion'] += str(message['message'][0]['messages'][0]['suggestedResponses'])
- return result
-
- async def ask(
- self,
- prompt: str,
- parentId: str = "0",
- sessionId: str = "",
- timeout: int = 60,
- stream: bool = False,
- image: bytes = None
- ) -> typing.Union[typing.Generator[TianGongChatResponse, None, None], TianGongChatResponse]:
- """提问
-
- Args:
- prompt (str): 提问内容
- parentId (str, optional): 父消息id. Defaults to "0".
- sessionId (str, optional): 对话id. Defaults to "".
- timeout (int, optional): 超时时间. Defaults to 60.
- stream (bool, optional): 是否流式. Defaults to False.
- image (bytes, optional): 图片二进制数据. Defaults to None.
- """
-
- return await (self._non_stream_ask(
- prompt,
- parentId,
- sessionId,
- timeout,
- image
- ))
From 6e2c4b4cec4ead0986ee32ac5f48f6c534b41a86 Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Wed, 10 Jul 2024 01:44:16 +0800
Subject: [PATCH 7/9] 111
---
free_one_api/impls/app.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index 2745627..fdc6488 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -23,7 +23,7 @@
from .adapter import hugchat
from .adapter import qianwen
from .adapter import tiangong
-from .adapter import re_gpt
+# from .adapter import re_gpt
from . import log
from . import cfg as cfgutil
@@ -209,7 +209,7 @@ async def make_application(config_path: str) -> Application:
"Soulter_hugging-chat-api": hugchat.HuggingChatAdapter,
"xw5xr6_revTongYi": qianwen.QianWenAdapter,
"DrTang": tiangong.tiangong,
- "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
+ # "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
}
for adapter_name in adapter_config_mapping:
From cc191f1446295accdea41994179277be1b6afa44 Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Fri, 12 Jul 2024 02:17:46 +0800
Subject: [PATCH 8/9] =?UTF-8?q?=E6=B7=BB=E5=8A=A0kimi=E9=80=82=E9=85=8D?=
=?UTF-8?q?=E5=99=A8=E6=8E=A5=E5=8F=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
free_one_api/impls/adapter/kimi.py | 117 +++++++++++++++++++++++++
free_one_api/impls/adapter/tiangong.py | 2 +-
free_one_api/impls/app.py | 2 +
requirements.txt | 3 +-
4 files changed, 122 insertions(+), 2 deletions(-)
create mode 100644 free_one_api/impls/adapter/kimi.py
diff --git a/free_one_api/impls/adapter/kimi.py b/free_one_api/impls/adapter/kimi.py
new file mode 100644
index 0000000..f620681
--- /dev/null
+++ b/free_one_api/impls/adapter/kimi.py
@@ -0,0 +1,117 @@
+import asyncio
+import typing
+import traceback
+import uuid
+import random
+
+import revkimi.kimichat as kimi
+
+from free_one_api.entities import request, response
+
+from ...models import adapter
+from ...models.adapter import llm
+from ...entities import request, response, exceptions
+from ...models.channel import evaluation
+
+
+@adapter.llm_adapter
+class KimiAdapter(llm.LLMLibAdapter):
+
+ @classmethod
+ def name(cls) -> str:
+ return "DrTang/revKimi"
+
+ @classmethod
+ def description(self) -> str:
+ return "suck my pussy"
+
+ def supported_models(self) -> list[str]:
+ return [
+ "gpt-3.5-turbo",
+ "gpt-4",
+ "gpt-4-1106-preview",
+ "gpt-4-vision-preview",
+ "gpt-4",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-32k",
+ "gpt-4-32k-0314",
+ "gpt-4-32k-0613",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ ]
+
+ def function_call_supported(self) -> bool:
+ return False
+
+ def stream_mode_supported(self) -> bool:
+ return True
+
+ def multi_round_supported(self) -> bool:
+ return True
+
+ @classmethod
+ def config_comment(cls) -> str:
+ return \
+ """
+ You should provide cookie string as `cookie` in config:
+ {
+ "cookie": "your cookie string"
+ }
+
+ """
+
+ @classmethod
+ def supported_path(cls) -> str:
+ return "/v1/chat/completions"
+
+ chatbot: kimi.Chatbot
+
+ def __init__(self, config: dict, eval: evaluation.AbsChannelEvaluation):
+ self.config = config
+ self.eval = eval
+ self.chatbot = kimi.Chatbot(
+ cookies_str=config['cookie']
+ )
+
+ async def test(self) -> typing.Union[bool, str]:
+ try:
+ resp =self.chatbot.ask(
+ prompt="Hello, reply 'hi' only.",
+ conversation_id="", # 会话ID(不填则会新建)
+ timeout=10, # 超时时间(默认10秒
+ use_search=False # 是否使用搜索
+ )
+
+ return True, ""
+ except Exception as e:
+ traceback.print_exc()
+ return False, str(e)
+
+ async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
+ prompt = ""
+
+ for msg in req.messages:
+ prompt += f"{msg['role']}: {msg['content']}\n"
+
+ prompt += "assistant: "
+
+ random_int = random.randint(0, 1000000000)
+
+ resp =self.chatbot.ask(
+ prompt=prompt,
+ conversation_id="", # 会话ID(不填则会新建)
+ timeout=10, # 超时时间(默认10秒
+ use_search=True # 是否使用搜索
+ )
+
+ yield response.Response(
+ id=random_int,
+ finish_reason=response.FinishReason.NULL,
+ normal_message=resp['text'],
+ function_call=None
+ )
diff --git a/free_one_api/impls/adapter/tiangong.py b/free_one_api/impls/adapter/tiangong.py
index c6ea5a6..b7b06de 100644
--- a/free_one_api/impls/adapter/tiangong.py
+++ b/free_one_api/impls/adapter/tiangong.py
@@ -15,7 +15,7 @@
@adapter.llm_adapter
-class QianWenAdapter(llm.LLMLibAdapter):
+class TianGongAdapter(llm.LLMLibAdapter):
@classmethod
def name(cls) -> str:
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index fdc6488..b4411a0 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -23,6 +23,7 @@
from .adapter import hugchat
from .adapter import qianwen
from .adapter import tiangong
+from .adapter import kimi
# from .adapter import re_gpt
from . import log
@@ -209,6 +210,7 @@ async def make_application(config_path: str) -> Application:
"Soulter_hugging-chat-api": hugchat.HuggingChatAdapter,
"xw5xr6_revTongYi": qianwen.QianWenAdapter,
"DrTang": tiangong.tiangong,
+ "DrTang": kimi.KimiAdapter,
# "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
}
diff --git a/requirements.txt b/requirements.txt
index 2134d28..2a2473b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,4 +10,5 @@ g4f
revTongYi
colorlog
git+https://github.com/Zai-Kun/reverse-engineered-chatgpt
-git+https://github.com/dd123-a/revTiangong.git
\ No newline at end of file
+git+https://github.com/dd123-a/revTiangong.git
+git+https://github.com/dd123-a/revkimi.git
\ No newline at end of file
From 1b7b8a968bec1e02b91bb9278c1064bfb93e343f Mon Sep 17 00:00:00 2001
From: wenqian <522361349@qq.com>
Date: Fri, 12 Jul 2024 16:45:02 +0800
Subject: [PATCH 9/9] =?UTF-8?q?=E8=BF=98=E5=8E=9Fre=5Fgpt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
free_one_api/impls/app.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/free_one_api/impls/app.py b/free_one_api/impls/app.py
index b4411a0..ff7c05c 100644
--- a/free_one_api/impls/app.py
+++ b/free_one_api/impls/app.py
@@ -24,7 +24,7 @@
from .adapter import qianwen
from .adapter import tiangong
from .adapter import kimi
-# from .adapter import re_gpt
+from .adapter import re_gpt
from . import log
from . import cfg as cfgutil
@@ -211,7 +211,7 @@ async def make_application(config_path: str) -> Application:
"xw5xr6_revTongYi": qianwen.QianWenAdapter,
"DrTang": tiangong.tiangong,
"DrTang": kimi.KimiAdapter,
- # "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
+ "Zai-Kun_reverse-engineered-chatgpt": re_gpt.ReGPTAdapter,
}
for adapter_name in adapter_config_mapping: