summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorkqlio67 <166700875+kqlio67@users.noreply.github.com>2024-07-25 08:21:55 +0200
committerkqlio67 <166700875+kqlio67@users.noreply.github.com>2024-07-25 08:21:55 +0200
commit29c13e26cd794208786a8a6cf421c264015c7e3a (patch)
treeda3a3ebdc903ac22c451d1ffd749eebd7c0aeec8 /g4f/Provider
parentMerge pull request #2106 from zachey01/main (diff)
downloadgpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar.gz
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar.bz2
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar.lz
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar.xz
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.tar.zst
gpt4free-29c13e26cd794208786a8a6cf421c264015c7e3a.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Allyfy.py71
-rw-r--r--g4f/Provider/ChatGot.py75
-rw-r--r--g4f/Provider/Chatgpt4Online.py101
-rw-r--r--g4f/Provider/GeminiProChat.py4
-rw-r--r--g4f/Provider/HuggingChat.py3
-rw-r--r--g4f/Provider/HuggingFace.py5
-rw-r--r--g4f/Provider/Liaobots.py45
-rw-r--r--g4f/Provider/PerplexityLabs.py15
-rw-r--r--g4f/Provider/Pi.py3
-rw-r--r--g4f/Provider/ReplicateHome.py48
-rw-r--r--g4f/Provider/You.py20
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/Openai.py3
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py2
14 files changed, 285 insertions, 112 deletions
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
new file mode 100644
index 00000000..8733b1ec
--- /dev/null
+++ b/g4f/Provider/Allyfy.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider):
+ url = "https://chatbot.allyfy.chat"
+ api_endpoint = "/api/v1/message/stream/super/chat"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json;charset=utf-8",
+ "dnt": "1",
+ "origin": "https://www.allyfy.chat",
+ "priority": "u=1, i",
+ "referer": "https://www.allyfy.chat/",
+ "referrer": "https://www.allyfy.chat",
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [{"content": prompt, "role": "user"}],
+ "content": prompt,
+ "baseInfo": {
+ "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 180,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = []
+ async for line in response.content:
+ line = line.decode().strip()
+ if line.startswith("data:"):
+ data_content = line[5:]
+ if data_content == "[DONE]":
+ break
+ try:
+ json_data = json.loads(data_content)
+ if "content" in json_data:
+ full_response.append(json_data["content"])
+ except json.JSONDecodeError:
+ continue
+ yield "".join(full_response)
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
new file mode 100644
index 00000000..55e8d0b6
--- /dev/null
+++ b/g4f/Provider/ChatGot.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.chatgot.one/"
+ working = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index ff9a2c8f..d55be65b 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -1,22 +1,18 @@
from __future__ import annotations
-import re
import json
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from .helper import format_prompt
+
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
- supports_message_history = True
- supports_gpt_35_turbo = True
- working = True
- _wpnonce = None
- _context_id = None
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = True
+ supports_gpt_4 = True
@classmethod
async def create_async_generator(
@@ -24,49 +20,52 @@ class Chatgpt4Online(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
- webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
- args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
- async with ClientSession(**args) as session:
- if not cls._wpnonce:
- async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
- result = re.search(r'restNonce&quot;:&quot;(.*?)&quot;', response)
- if result:
- cls._wpnonce = result.group(1)
- else:
- raise RuntimeError("No nonce found")
- result = re.search(r'contextId&quot;:(.*?),', response)
- if result:
- cls._context_id = result.group(1)
- else:
- raise RuntimeError("No contextId found")
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "x-wp-nonce": "d9505e9877",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "botId":"default",
- "customId":None,
- "session":"N/A",
- "chatId":get_random_string(11),
- "contextId":cls._context_id,
- "messages":messages[:-1],
- "newMessage":messages[-1]["content"],
- "newImageId":None,
- "stream":True
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
}
- async with session.post(
- f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
- json=data,
- proxy=proxy,
- headers={"x-wp-nonce": cls._wpnonce}
- ) as response:
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
- async for line in response.content:
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- if "type" not in line:
- raise RuntimeError(f"Response: {line}")
- elif line["type"] == "live":
- yield line["data"]
- elif line["type"] == "end":
- break
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index c61e2ff3..208ca773 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -13,10 +13,10 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.chatgot.one/"
+ url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
- default_model = ''
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index d480d13c..f7c6b581 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,8 +13,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index a5e27ccf..6634aa75 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -14,16 +14,17 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
needs_auth = True
supports_message_history = True
+ default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
- default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 277d8ea2..0cb5edff 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,14 +10,23 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
"gpt-4o-free": {
"context": "8K",
@@ -91,6 +100,15 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
@@ -155,10 +173,21 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
+ default_model = "gpt-4o"
models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2.0"
+ "gpt-4o-mini": "gpt-4o-mini-free",
+ "gpt-4o": "gpt-4o-free",
+ "claude-3-opus": "claude-3-opus-20240229",
+ "claude-3-opus": "claude-3-opus-20240229-aws",
+ "claude-3-opus": "claude-3-opus-20240229-gcp",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gemini-pro": "gemini-1.5-pro-latest",
+ "gemini-pro": "gemini-1.0-pro-latest",
+ "gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 4a2cc9e5..0a298e55 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -15,21 +15,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
- "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
- "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
- "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
- "related"
+ "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
]
- model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
- "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
- "codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
- "llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- "databricks/dbrx-instruct": "dbrx-instruct",
- "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
- "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
- }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 5a1e9f0e..e03830f4 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
+ default_model = "pi"
@classmethod
def create_completion(
@@ -65,4 +66,4 @@ class Pi(AbstractProvider):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
yield json.loads(line.split(b'data: ')[1])
- \ No newline at end of file
+
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 48336831..e6c8d2d3 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -14,40 +14,46 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
- default_model = 'stability-ai/sdxl'
+ default_model = 'stability-ai/stable-diffusion-3'
models = [
- # image
- 'stability-ai/sdxl',
- 'ai-forever/kandinsky-2.2',
+ # Models for image generation
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
- # text
- 'meta/llama-2-70b-chat',
- 'mistralai/mistral-7b-instruct-v0.2'
+ # Models for image generation
+ 'meta/meta-llama-3-70b-instruct',
+ 'mistralai/mixtral-8x7b-instruct-v0.1',
+ 'google-deepmind/gemma-2b-it',
]
versions = {
- # image
- 'stability-ai/sdxl': [
- "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
- "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
- "7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
+ # Model versions for generating images
+ 'stability-ai/stable-diffusion-3': [
+ "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
],
- 'ai-forever/kandinsky-2.2': [
- "ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
+ 'bytedance/sdxl-lightning-4step': [
+ "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
+ ],
+ 'playgroundai/playground-v2.5-1024px-aesthetic': [
+ "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
],
-
- # Text
- 'meta/llama-2-70b-chat': [
- "dp-542693885b1777c98ef8c5a98f2005e7"
+
+ # Model versions for text generation
+ 'meta/meta-llama-3-70b-instruct': [
+ "dp-cf04fe09351e25db628e8b6181276547"
],
- 'mistralai/mistral-7b-instruct-v0.2': [
+ 'mistralai/mixtral-8x7b-instruct-v0.1': [
"dp-89e00f489d498885048e94f9809fbc76"
+ ],
+ 'google-deepmind/gemma-2b-it': [
+ "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
]
}
- image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
- text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
+ image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
+ text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 162d6adb..cdf5f430 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
+ "gpt-4o-mini",
"gpt-4o",
- "gpt-4",
"gpt-4-turbo",
- "claude-instant",
- "claude-2",
+ "gpt-4",
+ "claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
- "gemini-pro",
+ "claude-2",
+ "llama-3.1-70b",
+ "llama-3",
+ "gemini-1-5-flash",
"gemini-1-5-pro",
+ "gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
- "llama3",
- "zephyr",
+ "dolphin-2.5",
default_vision_model,
*image_models
]
- model_aliases = {
- "claude-v2": "claude-2",
- }
_cookies = None
_cookies_used = 0
_telemetry_ids = []
@@ -220,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 56c01150..0bcab3f2 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,10 +11,12 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
+from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
+from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index 9da6bad8..a0740c47 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
+ default_model = ""
@classmethod
async def create_async_generator(
@@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9321c24a..e581cf55 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -61,7 +61,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
+ models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "gpt-4o-mini", "auto"]
model_aliases = {
"text-davinci-002-render-sha": "gpt-3.5-turbo",
"": "gpt-3.5-turbo",