summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/AI365VIP.py67
-rw-r--r--g4f/Provider/Blackbox.py37
-rw-r--r--g4f/Provider/Chatgpt4o.py83
-rw-r--r--g4f/Provider/ChatgptFree.py2
-rw-r--r--g4f/Provider/DDG.py117
-rw-r--r--g4f/Provider/Feedough.py78
-rw-r--r--g4f/Provider/FreeChatgpt.py97
-rw-r--r--g4f/Provider/FreeGpt.py66
-rw-r--r--g4f/Provider/GeminiProChat.py9
-rw-r--r--g4f/Provider/HuggingChat.py6
-rw-r--r--g4f/Provider/HuggingFace.py12
-rw-r--r--g4f/Provider/Koala.py36
-rw-r--r--g4f/Provider/Liaobots.py168
-rw-r--r--g4f/Provider/MetaAI.py7
-rw-r--r--g4f/Provider/ReplicateHome.py136
-rw-r--r--g4f/Provider/__init__.py16
16 files changed, 785 insertions, 152 deletions
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py
new file mode 100644
index 00000000..fc6ad237
--- /dev/null
+++ b/g4f/Provider/AI365VIP.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4o',
+ 'claude-3-haiku-20240307',
+ ]
+ model_aliases = {
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://chat.ai365vip.com",
+ "priority": "u=1, i",
+ "referer": "https://chat.ai365vip.com/en",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": {
+ "gpt-3.5-turbo": "GPT-3.5",
+ "claude-3-haiku-20240307": "claude-3-haiku",
+ "gpt-4o": "GPT-4O"
+ }.get(model, model),
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "prompt": "You are a helpful assistant.",
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 6e1e3949..a86471f2 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -2,31 +2,35 @@ from __future__ import annotations
import uuid
import secrets
-from aiohttp import ClientSession
+import re
+from aiohttp import ClientSession, ClientResponse
+from typing import AsyncGenerator, Optional
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class Blackbox(AsyncGeneratorProvider):
+class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
+ default_model = 'blackbox'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
- image: ImageType = None,
- image_name: str = None,
+ proxy: Optional[str] = None,
+ image: Optional[ImageType] = None,
+ image_name: Optional[str] = None,
**kwargs
- ) -> AsyncResult:
+ ) -> AsyncGenerator[str, None]:
if image is not None:
messages[-1]["data"] = {
- "fileText": image_name,
+ "fileText": image_name,
"imageBase64": to_data_uri(image)
}
+
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
@@ -40,9 +44,11 @@ class Blackbox(AsyncGeneratorProvider):
"Alt-Used": "www.blackbox.ai",
"Connection": "keep-alive",
}
+
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
+
data = {
"messages": messages,
"id": random_id,
@@ -55,10 +61,17 @@ class Blackbox(AsyncGeneratorProvider):
"playgroundMode": False,
"webSearchMode": False,
"userSystemPrompt": "",
- "githubToken": None
+ "githubToken": None,
+ "maxTokens": None
}
- async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+
+ async with session.post(
+ f"{cls.url}/api/chat", json=data, proxy=proxy
+ ) as response: # type: ClientResponse
response.raise_for_status()
- async for chunk in response.content:
+ async for chunk in response.content.iter_any():
if chunk:
- yield chunk.decode()
+ # Decode the chunk and clean up unwanted prefixes using a regex
+ decoded_chunk = chunk.decode()
+ cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
+ yield cleaned_chunk
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/Chatgpt4o.py
new file mode 100644
index 00000000..f3dc8a15
--- /dev/null
+++ b/g4f/Provider/Chatgpt4o.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import re
+from ..requests import StreamSession, raise_for_status
+from ..typing import Messages
+from .base_provider import AsyncProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ supports_gpt_4 = True
+ working = True
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o'
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 22064f1b..b1e00a22 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
- working = False
+ working = True
_post_id = None
_nonce = None
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
new file mode 100644
index 00000000..2aa78773
--- /dev/null
+++ b/g4f/Provider/DDG.py
@@ -0,0 +1,117 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+import asyncio
+from typing import Optional
+import base64
+
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import get_connector
+from ..typing import AsyncResult, Messages
+from ..requests.raise_for_status import raise_for_status
+from ..providers.conversation import BaseConversation
+
+class DDG(AsyncGeneratorProvider, ProviderModelMixin):
+ url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8")
+ working = True
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+
+ default_model = "gpt-3.5-turbo-0125"
+ models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307", "meta-llama/Llama-3-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
+ "mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ }
+
+ # Obfuscated URLs and headers
+ status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8")
+ chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8")
+ referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
+ origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
+
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:127.0) Gecko/20100101 Firefox/127.0'
+ headers = {
+ 'User-Agent': user_agent,
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
+ 'Referer': referer,
+ 'Content-Type': 'application/json',
+ 'Origin': origin,
+ 'Connection': 'keep-alive',
+ 'Cookie': 'dcm=3',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Pragma': 'no-cache',
+ 'TE': 'trailers'
+ }
+
+ @classmethod
+ async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]:
+ try:
+ async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
+ await raise_for_status(response)
+ return response.headers.get("x-vqd-4")
+ except Exception as e:
+ print(f"Error getting VQD: {e}")
+ return None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: aiohttp.BaseConnector = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session:
+ vqd_4 = None
+ if conversation is not None and len(messages) > 1:
+ vqd_4 = conversation.vqd_4
+ messages = [*conversation.messages, messages[-2], messages[-1]]
+ else:
+ for _ in range(3): # Try up to 3 times to get a valid VQD
+ vqd_4 = await cls.get_vqd(session)
+ if vqd_4:
+ break
+ await asyncio.sleep(1) # Wait a bit before retrying
+
+ if not vqd_4:
+ raise Exception("Failed to obtain a valid VQD token")
+
+ messages = [messages[-1]] # Only use the last message for new conversations
+
+ payload = {
+ 'model': cls.get_model(model),
+ 'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
+ }
+
+ async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
+ await raise_for_status(response)
+ if return_conversation:
+ yield Conversation(vqd_4, messages)
+
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b"[DONE]"):
+ break
+ try:
+ data = json.loads(chunk)
+ if "message" in data and data["message"]:
+ yield data["message"]
+ except json.JSONDecodeError:
+ print(f"Failed to decode JSON: {chunk}")
+
+class Conversation(BaseConversation):
+ def __init__(self, vqd_4: str, messages: Messages) -> None:
+ self.vqd_4 = vqd_4
+ self.messages = messages
diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py
new file mode 100644
index 00000000..d35e30ee
--- /dev/null
+++ b/g4f/Provider/Feedough.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
+ working = True
+ default_model = ''
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
+ data = {
+ "action": "aixg_generate",
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
+ }
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 3fb247c7..7d8c1d10 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -1,17 +1,27 @@
from __future__ import annotations
-
import json
-from aiohttp import ClientSession, ClientTimeout
-
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests.raise_for_status import raise_for_status
+from .helper import format_prompt
+
class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://free.chatgpt.org.uk"
+ url = "https://chat.chatgpt.org.uk"
+ api_endpoint = "/api/openai/v1/chat/completions"
working = True
- supports_message_history = True
- default_model = "google-gemini-pro"
+ supports_gpt_35_turbo = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'SparkDesk-v1.1',
+ 'deepseek-coder',
+ 'deepseek-chat',
+ 'Qwen2-7B-Instruct',
+ 'glm4-9B-chat',
+ 'chatglm3-6B',
+ 'Yi-1.5-9B-Chat',
+ ]
@classmethod
async def create_async_generator(
@@ -19,45 +29,50 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- timeout: int = 120,
**kwargs
) -> AsyncResult:
headers = {
- "Accept": "application/json, text/event-stream",
- "Content-Type":"application/json",
- "Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en-US,en;q=0.5",
- "Host":"free.chatgpt.org.uk",
- "Referer":f"{cls.url}/",
- "Origin":f"{cls.url}",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
- async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "messages": messages,
+ "messages": [
+ {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
+ {"role": "user", "content": prompt}
+ ],
"stream": True,
- "model": cls.get_model(""),
- "temperature": kwargs.get("temperature", 0.5),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "top_p": kwargs.get("top_p", 1)
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
}
- async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
- await raise_for_status(response)
- started = False
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ accumulated_text = ""
async for line in response.content:
- if line.startswith(b"data: [DONE]"):
- break
- elif line.startswith(b"data: "):
- line = json.loads(line[6:])
- if(line["choices"]==[]):
- continue
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- started = True
- yield chunk
- if not started:
- raise RuntimeError("Empty response") \ No newline at end of file
+ if line:
+ line_str = line.decode().strip()
+ if line_str == "data: [DONE]":
+ yield accumulated_text
+ break
+ elif line_str.startswith("data: "):
+ try:
+ chunk = json.loads(line_str[6:])
+ delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
+ accumulated_text += delta_content
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index f79f0a66..7fa3b5ab 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -1,55 +1,67 @@
from __future__ import annotations
-import time, hashlib, random
-
-from ..typing import AsyncResult, Messages
+import time
+import hashlib
+import random
+from typing import AsyncGenerator, Optional, Dict, Any
+from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
-domains = [
+# Constants
+DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
]
+RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
+
class FreeGpt(AsyncGeneratorProvider):
- url = "https://freegptsnav.aifree.site"
- working = True
- supports_message_history = True
- supports_system_message = True
- supports_gpt_35_turbo = True
+ url: str = "https://freegptsnav.aifree.site"
+ working: bool = True
+ supports_message_history: bool = True
+ supports_system_message: bool = True
+ supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
+ proxy: Optional[str] = None,
timeout: int = 120,
- **kwargs
- ) -> AsyncResult:
+ **kwargs: Any
+ ) -> AsyncGenerator[str, None]:
+ prompt = messages[-1]["content"]
+ timestamp = int(time.time())
+ data = cls._build_request_data(messages, prompt, timestamp)
+
+ domain = random.choice(DOMAINS)
+
async with StreamSession(
impersonate="chrome",
timeout=timeout,
- proxies={"all": proxy}
+ proxies={"all": proxy} if proxy else None
) as session:
- prompt = messages[-1]["content"]
- timestamp = int(time.time())
- data = {
- "messages": messages,
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, prompt)
- }
- domain = random.choice(domains)
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
- chunk = chunk.decode(errors="ignore")
- if chunk == "当前地区当日额度已消耗完":
+ chunk_decoded = chunk.decode(errors="ignore")
+ if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
- yield chunk
-
-def generate_signature(timestamp: int, message: str, secret: str = ""):
+ yield chunk_decoded
+
+ @staticmethod
+ def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
+ return {
+ "messages": messages,
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, prompt, secret)
+ }
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index d88c4ed0..c61e2ff3 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -9,13 +9,14 @@ from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class GeminiProChat(AsyncGeneratorProvider):
+class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
+ default_model = ''
@classmethod
async def create_async_generator(
@@ -32,8 +33,8 @@ class GeminiProChat(AsyncGeneratorProvider):
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
- "Referer": "https://gemini-chatbot-sigma.vercel.app/",
- "Origin": "https://gemini-chatbot-sigma.vercel.app",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 00d49b82..d480d13c 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,15 +13,13 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
- "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
'CohereForAI/c4ai-command-r-plus',
+ 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'google/gemma-1.1-7b-it',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'microsoft/Phi-3-mini-4k-instruct',
- '01-ai/Yi-1.5-34B-Chat'
]
model_aliases = {
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index 6a05c26e..a5e27ccf 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -12,10 +12,16 @@ from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
+ needs_auth = True
supports_message_history = True
models = [
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
- "mistralai/Mistral-7B-Instruct-v0.2"
+ 'CohereForAI/c4ai-command-r-plus',
+ 'meta-llama/Meta-Llama-3-70B-Instruct',
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
+ 'mistralai/Mistral-7B-Instruct-v0.2',
+ 'microsoft/Phi-3-mini-4k-instruct',
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@@ -74,4 +80,4 @@ def format_prompt(messages: Messages) -> str:
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
- return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
+ return f"{history}<s>[INST] {question} [/INST]"
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 849bcdbe..c708bcb9 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -1,7 +1,8 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession, BaseConnector
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -19,12 +20,13 @@ class Koala(AsyncGeneratorProvider):
cls,
model: str,
messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs
- ) -> AsyncResult:
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
model = "gpt-3.5-turbo"
+
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
@@ -40,13 +42,17 @@ class Koala(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
}
+
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
- input = messages[-1]["content"]
- system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
if system_messages:
- input += " ".join(system_messages)
+ input_text += f" {system_messages}"
+
data = {
- "input": input,
+ "input": input_text,
"inputHistory": [
message["content"]
for message in messages[:-1]
@@ -59,8 +65,14 @@ class Koala(AsyncGeneratorProvider):
],
"model": model,
}
+
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
await raise_for_status(response)
- async for chunk in response.content:
- if chunk.startswith(b"data: "):
- yield json.loads(chunk[6:]) \ No newline at end of file
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 75ecf300..277d8ea2 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,7 +10,16 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-4o": {
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 48000,
+ "tokenLimit": 14000,
+ "context": "16K",
+ },
+ "gpt-4o-free": {
"context": "8K",
"id": "gpt-4o-free",
"maxLength": 31200,
@@ -19,51 +28,74 @@ models = {
"provider": "OpenAI",
"tokenLimit": 7800,
},
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
- },
- "gpt-4-turbo": {
- "id": "gpt-4-turbo-preview",
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4": {
- "id": "gpt-4-plus",
- "name": "GPT-4-Plus",
- "maxLength": 130000,
- "tokenLimit": 31000,
- "context": "32K",
+ "gpt-4o": {
+ "context": "128K",
+ "id": "gpt-4o",
+ "maxLength": 124000,
+ "model": "ChatGPT",
+ "name": "GPT-4o",
+ "provider": "OpenAI",
+ "tokenLimit": 62000,
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4-0613",
- "maxLength": 60000,
- "tokenLimit": 15000,
- "context": "16K",
- },
- "gemini-pro": {
- "id": "gemini-pro",
- "name": "Gemini-Pro",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 32000,
+ "tokenLimit": 7600,
+ "context": "8K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-opus-20240229-aws": {
+ "id": "claude-3-opus-20240229-aws",
+ "name": "Claude-3-Opus-Aws",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
+ "claude-3-opus-100k-poe": {
+ "id": "claude-3-opus-100k-poe",
+ "name": "Claude-3-Opus-100k-Poe",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 400000,
+ "tokenLimit": 99000,
+ "context": "100K",
+ },
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-haiku-20240307": {
+ "id": "claude-3-haiku-20240307",
+ "name": "Claude-3-Haiku",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -71,6 +103,8 @@ models = {
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -78,16 +112,38 @@ models = {
"claude-2.0": {
"id": "claude-2.0",
"name": "Claude-2.0-100k",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
},
- "claude-instant-1": {
- "id": "claude-instant-1",
- "name": "Claude-instant-1",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
+ "gemini-1.0-pro-latest": {
+ "id": "gemini-1.0-pro-latest",
+ "name": "Gemini-Pro",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 120000,
+ "tokenLimit": 30000,
+ "context": "32K",
+ },
+ "gemini-1.5-flash-latest": {
+ "id": "gemini-1.5-flash-latest",
+ "name": "Gemini-1.5-Flash-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
+ },
+ "gemini-1.5-pro-latest": {
+ "id": "gemini-1.5-pro-latest",
+ "name": "Gemini-1.5-Pro-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
}
}
@@ -100,9 +156,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
- models = list(models)
+ models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2"
+ "claude-v2": "claude-2.0"
}
_auth_code = ""
_cookie_jar = None
@@ -131,7 +187,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
data = {
"conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
+ "model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -189,3 +245,45 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ """
+ Retrieve the internal model identifier based on the provided model name or alias.
+ """
+ if model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ if model not in models:
+ raise ValueError(f"Model '{model}' is not supported.")
+ return model
+
+ @classmethod
+ def is_supported(cls, model: str) -> bool:
+ """
+ Check if the given model is supported.
+ """
+ return model in models or model in cls.model_aliases
+
+ @classmethod
+ async def initialize_auth_code(cls, session: ClientSession) -> None:
+ """
+ Initialize the auth code by making the necessary login requests.
+ """
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
+
+ @classmethod
+ async def ensure_auth_code(cls, session: ClientSession) -> None:
+ """
+ Ensure the auth code is initialized, and if not, perform the initialization.
+ """
+ if not cls._auth_code:
+ await cls.initialize_auth_code(session)
diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/MetaAI.py
index caed7778..f1ef348a 100644
--- a/g4f/Provider/MetaAI.py
+++ b/g4f/Provider/MetaAI.py
@@ -12,7 +12,7 @@ from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse, ImagePreview
from ..errors import ResponseError
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector, format_cookies
class Sources():
@@ -25,10 +25,11 @@ class Sources():
class AbraGeoBlockedError(Exception):
pass
-class MetaAI(AsyncGeneratorProvider):
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Meta AI"
url = "https://www.meta.ai"
working = True
+ default_model = ''
def __init__(self, proxy: str = None, connector: BaseConnector = None):
self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
@@ -234,4 +235,4 @@ def generate_offline_threading_id() -> str:
# Combine timestamp and random value
threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
- return str(threading_id) \ No newline at end of file
+ return str(threading_id)
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
new file mode 100644
index 00000000..48336831
--- /dev/null
+++ b/g4f/Provider/ReplicateHome.py
@@ -0,0 +1,136 @@
+from __future__ import annotations
+from typing import Generator, Optional, Dict, Any, Union, List
+import random
+import asyncio
+import base64
+
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..typing import AsyncResult, Messages
+from ..requests import StreamSession, raise_for_status
+from ..errors import ResponseError
+from ..image import ImageResponse
+
+class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://replicate.com"
+ parent = "Replicate"
+ working = True
+ default_model = 'stability-ai/sdxl'
+ models = [
+ # image
+ 'stability-ai/sdxl',
+ 'ai-forever/kandinsky-2.2',
+
+ # text
+ 'meta/llama-2-70b-chat',
+ 'mistralai/mistral-7b-instruct-v0.2'
+ ]
+
+ versions = {
+ # image
+ 'stability-ai/sdxl': [
+ "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
+ "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
+ "7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
+ ],
+ 'ai-forever/kandinsky-2.2': [
+ "ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
+ ],
+
+
+ # Text
+ 'meta/llama-2-70b-chat': [
+ "dp-542693885b1777c98ef8c5a98f2005e7"
+ ],
+ 'mistralai/mistral-7b-instruct-v0.2': [
+ "dp-89e00f489d498885048e94f9809fbc76"
+ ]
+ }
+
+ image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
+ text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs: Any
+ ) -> Generator[Union[str, ImageResponse], None, None]:
+ yield await cls.create_async(messages[-1]["content"], model, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ prompt: str,
+ model: str,
+ api_key: Optional[str] = None,
+ proxy: Optional[str] = None,
+ timeout: int = 180,
+ version: Optional[str] = None,
+ extra_data: Dict[str, Any] = {},
+ **kwargs: Any
+ ) -> Union[str, ImageResponse]:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': cls.url,
+ 'Referer': f'{cls.url}/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+
+ if version is None:
+ version = random.choice(cls.versions.get(model, []))
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ async with StreamSession(
+ proxies={"all": proxy},
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ data = {
+ "input": {
+ "prompt": prompt,
+ **extra_data
+ },
+ "version": version
+ }
+ if api_key is None:
+ data["model"] = cls.get_model(model)
+ url = "https://homepage.replicate.com/api/prediction"
+ else:
+ url = "https://api.replicate.com/v1/predictions"
+ async with session.post(url, json=data) as response:
+ await raise_for_status(response)
+ result = await response.json()
+ if "id" not in result:
+ raise ResponseError(f"Invalid response: {result}")
+
+ while True:
+ if api_key is None:
+ url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
+ else:
+ url = f"https://api.replicate.com/v1/predictions/{result['id']}"
+ async with session.get(url) as response:
+ await raise_for_status(response)
+ result = await response.json()
+ if "status" not in result:
+ raise ResponseError(f"Invalid response: {result}")
+ if result["status"] == "succeeded":
+ output = result['output']
+ if model in cls.text_models:
+ return ''.join(output) if isinstance(output, list) else output
+ elif model in cls.image_models:
+ images: List[Any] = output
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt)
+ elif result["status"] == "failed":
+ raise ResponseError(f"Prediction failed: {result}")
+ await asyncio.sleep(0.5)
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index dab6f5d5..56c01150 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -10,28 +10,24 @@ from .not_working import *
from .selenium import *
from .needs_auth import *
-from .Aichatos import Aichatos
+from .AI365VIP import AI365VIP
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
-from .ChatForAi import ChatForAi
+from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
-from .ChatgptAi import ChatgptAi
from .ChatgptFree import ChatgptFree
-from .ChatgptNext import ChatgptNext
-from .ChatgptX import ChatgptX
-from .Cnote import Cnote
from .Cohere import Cohere
+from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
-from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
-from .GigaChat import GigaChat
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
+from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
@@ -45,12 +41,12 @@ from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
+from .Reka import Reka
from .Replicate import Replicate
-from .ReplicateImage import ReplicateImage
+from .ReplicateHome import ReplicateHome
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
-from .Reka import Reka
import sys