summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated/AiAsk.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-24 12:23:53 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-24 12:23:53 +0200
commitf8e403a745c5caff31d7edb854dcba40eba3166d (patch)
treea75c6030a8054c56201fa2d41306a51b9052545c /g4f/Provider/deprecated/AiAsk.py
parentAdded gpt-4o provider (diff)
downloadgpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.gz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.bz2
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.lz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.xz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.zst
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.zip
Diffstat (limited to 'g4f/Provider/deprecated/AiAsk.py')
-rw-r--r--g4f/Provider/deprecated/AiAsk.py46
1 files changed, 0 insertions, 46 deletions
diff --git a/g4f/Provider/deprecated/AiAsk.py b/g4f/Provider/deprecated/AiAsk.py
deleted file mode 100644
index 6ea5f3e0..00000000
--- a/g4f/Provider/deprecated/AiAsk.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-
-class AiAsk(AsyncGeneratorProvider):
- url = "https://e.aiask.me"
- supports_message_history = True
- supports_gpt_35_turbo = True
- working = False
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "application/json, text/plain, */*",
- "origin": cls.url,
- "referer": f"{cls.url}/chat",
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "continuous": True,
- "id": "fRMSQtuHl91A4De9cCvKD",
- "list": messages,
- "models": "0",
- "prompt": "",
- "temperature": kwargs.get("temperature", 0.5),
- "title": "",
- }
- buffer = ""
- rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
- async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content.iter_any():
- buffer += chunk.decode()
- if not rate_limit.startswith(buffer):
- yield buffer
- buffer = ""
- elif buffer == rate_limit:
- raise RuntimeError("Rate limit reached") \ No newline at end of file