diff options
author | Heiner Lohaus <heiner@lohaus.eu> | 2023-09-18 03:21:12 +0200 |
---|---|---|
committer | Heiner Lohaus <heiner@lohaus.eu> | 2023-09-18 03:21:12 +0200 |
commit | e8d7bcd0458b508fd56643c1efd0a294173ea26b (patch) | |
tree | 6e1ab7be931c2c5cf6474aab9204df2b5eca47c0 /g4f | |
parent | ~ (diff) | |
download | gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar.gz gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar.bz2 gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar.lz gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar.xz gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.tar.zst gpt4free-e8d7bcd0458b508fd56643c1efd0a294173ea26b.zip |
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/AItianhu.py | 59 | ||||
-rw-r--r-- | g4f/Provider/GptGo.py | 78 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 2 |
3 files changed, 98 insertions, 41 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 2e129896..0f01e536 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -1,61 +1,38 @@ from __future__ import annotations import json -from aiohttp import ClientSession, http +from curl_cffi.requests import AsyncSession -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt +from .base_provider import AsyncProvider, format_prompt -class AItianhu(AsyncGeneratorProvider): +class AItianhu(AsyncProvider): url = "https://www.aitianhu.com" working = True supports_gpt_35_turbo = True @classmethod - async def create_async_generator( + async def create_async( cls, model: str, messages: list[dict[str, str]], proxy: str = None, **kwargs - ) -> AsyncGenerator: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", - "Accept": "application/json, text/plain, */*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Content-Type": "application/json", - "Origin": cls.url, - "Connection": "keep-alive", - "Referer": cls.url + "/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", + ) -> str: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs } - async with ClientSession( - headers=headers, - version=http.HttpVersion10 - ) as session: - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } - async with session.post( - cls.url + "/api/chat-process", - proxy=proxy, - json=data, - ssl=False, - ) as response: - response.raise_for_status() - async for line in response.content: - line = json.loads(line.decode('utf-8')) - token = line["detail"]["choices"][0]["delta"].get("content") - if token: - yield token + async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: + response = await session.post(cls.url + "/api/chat-process", json=data) + response.raise_for_status() + line = response.text.splitlines()[-1] + line = json.loads(line) + return line["text"] @classmethod diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py new file mode 100644 index 00000000..7db8fb0d --- /dev/null +++ b/g4f/Provider/GptGo.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class GptGo(AsyncGeneratorProvider): + url = "https://gptgo.ai" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + async with session.get( + "https://gptgo.ai/action_get_token.php", + params={ + "q": format_prompt(messages), + "hlgpt": "default", + "hl": "en" + }, + proxy=proxy + ) as response: + response.raise_for_status() + token = (await response.json(content_type=None))["token"] + + async with session.get( + "https://gptgo.ai/action_ai_gpt.php", + params={ + "token": token, + }, + proxy=proxy + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[len(start):-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 2a3d820e..c36782b4 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -16,6 +16,7 @@ from .DfeHub import DfeHub from .EasyChat import EasyChat from .Forefront import Forefront from .GetGpt import GetGpt +from .GptGo import GptGo from .H2o import H2o from .HuggingChat import HuggingChat from .Liaobots import Liaobots @@ -57,6 +58,7 @@ __all__ = [ 'EasyChat', 'Forefront', 'GetGpt', + 'GptGo', 'H2o', 'HuggingChat', 'Liaobots', |