diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-08-30 22:39:18 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-08-30 22:39:18 +0200 |
commit | c702f54e39a39c702cb2a2a8c6782c15422785aa (patch) | |
tree | 8a36ace98ab138e1eff134a5ed8891fd3c817b5b /g4f/Provider | |
parent | . (diff) | |
parent | fix for 500 Internal Server Error #2199 [Request] Blackbox provider now support Gemini and LLaMa 3.1 models #2198 with some stuff from #2196 (diff) | |
download | gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.gz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.bz2 gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.lz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.xz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.zst gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.zip |
Diffstat (limited to 'g4f/Provider')
26 files changed, 411 insertions, 393 deletions
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py index fc6ad237..2dcc8d1c 100644 --- a/g4f/Provider/AI365VIP.py +++ b/g4f/Provider/AI365VIP.py @@ -35,31 +35,35 @@ class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin): "accept": "*/*", "accept-language": "en-US,en;q=0.9", "content-type": "application/json", - "dnt": "1", - "origin": "https://chat.ai365vip.com", - "priority": "u=1, i", - "referer": "https://chat.ai365vip.com/en", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', + "origin": cls.url, + "referer": f"{cls.url}/en", + "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', + "sec-ch-ua-arch": '"x86"', + "sec-ch-ua-bitness": '"64"', + "sec-ch-ua-full-version": '"127.0.6533.119"', + "sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"', "sec-ch-ua-mobile": "?0", + "sec-ch-ua-model": '""', "sec-ch-ua-platform": '"Linux"', + "sec-ch-ua-platform-version": '"4.19.276"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", } async with ClientSession(headers=headers) as session: data = { - "model": { - "id": model, - "name": { - "gpt-3.5-turbo": "GPT-3.5", - "claude-3-haiku-20240307": "claude-3-haiku", - "gpt-4o": "GPT-4O" - }.get(model, model), - }, - "messages": [{"role": "user", "content": format_prompt(messages)}], - "prompt": "You are a helpful assistant.", - } + "model": { + "id": model, + "name": "GPT-3.5", + "maxLength": 3000, + "tokenLimit": 2048 + }, + "messages": [{"role": "user", "content": format_prompt(messages)}], + "key": "", + "prompt": "You are a helpful assistant.", + "temperature": 1 + } async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content: diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/AiChatOnline.py new file mode 100644 index 00000000..152a7d31 --- /dev/null +++ b/g4f/Provider/AiChatOnline.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import get_random_string, format_prompt + +class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): + site_url = "https://aichatonline.org" + url = "https://aichatonlineorg.erweima.ai" + api_endpoint = "/aichatonline/api/chat/gpt" + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + default_model = 'gpt-4o-mini' + supports_message_history = False + + @classmethod + async def grab_token( + cls, + session: ClientSession, + proxy: str + ): + async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response: + response.raise_for_status() + return (await response.json())['data'] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/chatgpt/chat/", + "Content-Type": "application/json", + "Origin": cls.url, + "Alt-Used": "aichatonline.org", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + data = { + "conversationId": get_random_string(), + "prompt": format_prompt(messages), + } + headers['UniqueId'] = await cls.grab_token(session, proxy) + async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + try: + yield json.loads(chunk)['data']['message'] + except: + continue
\ No newline at end of file diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py index 7e2b2831..4a8d0a55 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/Aura.py @@ -33,8 +33,8 @@ class Aura(AsyncGeneratorProvider): new_messages.append(message) data = { "model": { - "id": "openchat_v3.2_mistral", - "name": "OpenChat Aura", + "id": "openchat_3.6", + "name": "OpenChat 3.6 (latest)", "maxLength": 24576, "tokenLimit": max_tokens }, diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index a86471f2..fd84875e 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -14,7 +14,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.blackbox.ai" working = True default_model = 'blackbox' - + models = [ + default_model, + "gemini-1.5-flash", + "llama-3.1-8b", + 'llama-3.1-70b', + 'llama-3.1-405b', + ] @classmethod async def create_async_generator( cls, @@ -28,7 +34,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): if image is not None: messages[-1]["data"] = { "fileText": image_name, - "imageBase64": to_data_uri(image) + "imageBase64": to_data_uri(image), + "title": str(uuid.uuid4()) } headers = { @@ -48,7 +55,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): async with ClientSession(headers=headers) as session: random_id = secrets.token_hex(16) random_user_id = str(uuid.uuid4()) - + model_id_map = { + "blackbox": {}, + "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, + "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, + 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, + 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"} + } data = { "messages": messages, "id": random_id, @@ -62,12 +75,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "webSearchMode": False, "userSystemPrompt": "", "githubToken": None, + "trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo "maxTokens": None } async with session.post( f"{cls.url}/api/chat", json=data, proxy=proxy - ) as response: # type: ClientResponse + ) as response: response.raise_for_status() async for chunk in response.content.iter_any(): if chunk: diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py index f62ef8af..8c058fdc 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/Chatgpt4Online.py @@ -14,8 +14,8 @@ class Chatgpt4Online(AsyncGeneratorProvider): working = True supports_gpt_4 = True - async def get_nonce(): - async with ClientSession() as session: + async def get_nonce(headers: dict) -> str: + async with ClientSession(headers=headers) as session: async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response: return (await response.json())["restNonce"] @@ -42,9 +42,8 @@ class Chatgpt4Online(AsyncGeneratorProvider): "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - "x-wp-nonce": await cls.get_nonce(), } - + headers['x-wp-nonce'] = await cls.get_nonce(headers) async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) data = { diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index b1e00a22..3cf363a5 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -1,21 +1,26 @@ from __future__ import annotations import re - +import json +import asyncio from ..requests import StreamSession, raise_for_status -from ..typing import Messages -from .base_provider import AsyncProvider +from ..typing import Messages, AsyncGenerator +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt -class ChatgptFree(AsyncProvider): +class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatgptfree.ai" - supports_gpt_35_turbo = True + supports_gpt_4 = True working = True _post_id = None _nonce = None + default_model = 'gpt-4o-mini-2024-07-18' + model_aliases = { + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + } @classmethod - async def create_async( + async def create_async_generator( cls, model: str, messages: Messages, @@ -23,7 +28,7 @@ class ChatgptFree(AsyncProvider): timeout: int = 120, cookies: dict = None, **kwargs - ) -> str: + ) -> AsyncGenerator[str, None]: headers = { 'authority': 'chatgptfree.ai', 'accept': '*/*', @@ -38,7 +43,6 @@ class ChatgptFree(AsyncProvider): 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } - async with StreamSession( headers=headers, cookies=cookies, @@ -49,19 +53,11 @@ class ChatgptFree(AsyncProvider): if not cls._nonce: async with session.get(f"{cls.url}/") as response: - await raise_for_status(response) response = await response.text() - - result = re.search(r'data-post-id="([0-9]+)"', response) - if not result: - raise RuntimeError("No post id found") - cls._post_id = result.group(1) - result = re.search(r'data-nonce="(.*?)"', response) if result: cls._nonce = result.group(1) - else: raise RuntimeError("No nonce found") @@ -74,6 +70,30 @@ class ChatgptFree(AsyncProvider): "message": prompt, "bot_id": "0" } + async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: await raise_for_status(response) - return (await response.json())["data"]
\ No newline at end of file + buffer = "" + async for line in response.iter_lines(): + line = line.decode('utf-8').strip() + if line.startswith('data: '): + data = line[6:] + if data == '[DONE]': + break + try: + json_data = json.loads(data) + content = json_data['choices'][0]['delta'].get('content', '') + if content: + yield content + except json.JSONDecodeError: + continue + elif line: + buffer += line + + if buffer: + try: + json_response = json.loads(buffer) + if 'data' in json_response: + yield json_response['data'] + except json.JSONDecodeError: + print(f"Failed to decode final JSON. Buffer content: {buffer}")
\ No newline at end of file diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index 6146994b..b8e1732f 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -25,7 +25,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin): "gpt-4o": "gpt-4o-mini", "claude-3-haiku": "claude-3-haiku-20240307", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - "mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1" + "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1" } # Obfuscated URLs and headers diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py index f3e31962..b12fb254 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/DeepInfra.py @@ -11,11 +11,7 @@ class DeepInfra(Openai): needs_auth = True supports_stream = True supports_message_history = True - default_model = "meta-llama/Meta-Llama-3-70B-Instruct" - default_vision_model = "llava-hf/llava-1.5-7b-hf" - model_aliases = { - 'dbrx-instruct': 'databricks/dbrx-instruct', - } + default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" @classmethod def get_models(cls): diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py deleted file mode 100644 index d35e30ee..00000000 --- a/g4f/Provider/Feedough.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import annotations - -import json -import asyncio -from aiohttp import ClientSession, TCPConnector -from urllib.parse import urlencode - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Feedough(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.feedough.com" - api_endpoint = "/wp-admin/admin-ajax.php" - working = True - default_model = '' - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/x-www-form-urlencoded;charset=UTF-8", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/ai-prompt-generator/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" - } - - connector = TCPConnector(ssl=False) - - async with ClientSession(headers=headers, connector=connector) as session: - data = { - "action": "aixg_generate", - "prompt": format_prompt(messages), - "aixg_generate_nonce": "110c021031" - } - - try: - async with session.post( - f"{cls.url}{cls.api_endpoint}", - data=urlencode(data), - proxy=proxy - ) as response: - response.raise_for_status() - response_text = await response.text() - try: - response_json = json.loads(response_text) - if response_json.get("success") and "data" in response_json: - message = response_json["data"].get("message", "") - yield message - except json.JSONDecodeError: - yield response_text - except Exception as e: - print(f"An error occurred: {e}") - - @classmethod - async def run(cls, *args, **kwargs): - async for item in cls.create_async_generator(*args, **kwargs): - yield item - - tasks = asyncio.all_tasks() - for task in tasks: - if not task.done(): - await task diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py index 7d8c1d10..6485ced2 100644 --- a/g4f/Provider/FreeChatgpt.py +++ b/g4f/Provider/FreeChatgpt.py @@ -16,14 +16,32 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): 'gpt-3.5-turbo', 'SparkDesk-v1.1', 'deepseek-coder', + '@cf/qwen/qwen1.5-14b-chat-awq', 'deepseek-chat', 'Qwen2-7B-Instruct', 'glm4-9B-chat', 'chatglm3-6B', 'Yi-1.5-9B-Chat', ] + model_aliases = { + "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", + "sparkdesk-v1.1": "SparkDesk-v1.1", + "qwen2-7b": "Qwen2-7B-Instruct", + "glm4-9b": "glm4-9B-chat", + "chatglm3-6b": "chatglm3-6B", + "yi-1.5-9b": "Yi-1.5-9B-Chat", + } @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model.lower() in cls.model_aliases: + return cls.model_aliases[model.lower()] + else: + return cls.default_model + + @classmethod async def create_async_generator( cls, model: str, @@ -46,6 +64,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", } + model = cls.get_model(model) async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) data = { @@ -74,5 +93,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): chunk = json.loads(line_str[6:]) delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") accumulated_text += delta_content + yield delta_content except json.JSONDecodeError: pass diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 7fa3b5ab..82a3824b 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -6,23 +6,25 @@ import random from typing import AsyncGenerator, Optional, Dict, Any from ..typing import Messages from ..requests import StreamSession, raise_for_status -from .base_provider import AsyncGeneratorProvider +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..errors import RateLimitError # Constants DOMAINS = [ "https://s.aifree.site", - "https://v.aifree.site/" + "https://v.aifree.site/", + "https://al.aifree.site/", + "https://u4.aifree.site/" ] RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完" -class FreeGpt(AsyncGeneratorProvider): - url: str = "https://freegptsnav.aifree.site" - working: bool = True - supports_message_history: bool = True - supports_system_message: bool = True - supports_gpt_35_turbo: bool = True +class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://freegptsnav.aifree.site" + working = True + supports_message_history = True + supports_system_message = True + default_model = 'llama-3.1-70b' @classmethod async def create_async_generator( diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py index 624f33cf..d0543176 100644 --- a/g4f/Provider/FreeNetfly.py +++ b/g4f/Provider/FreeNetfly.py @@ -54,8 +54,8 @@ class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): "top_p": 1 } - max_retries = 3 - retry_delay = 1 + max_retries = 5 + retry_delay = 2 for attempt in range(max_retries): try: diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index c130d183..f1010c1c 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -2,16 +2,16 @@ from __future__ import annotations import json, requests, re -from curl_cffi import requests as cf_reqs -from ..typing import CreateResult, Messages +from curl_cffi import requests as cf_reqs +from ..typing import CreateResult, Messages from .base_provider import ProviderModelMixin, AbstractProvider -from .helper import format_prompt +from .helper import format_prompt class HuggingChat(AbstractProvider, ProviderModelMixin): - url = "https://huggingface.co/chat" - working = True + url = "https://huggingface.co/chat" + working = True supports_stream = True - default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" + default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', @@ -19,24 +19,41 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', '01-ai/Yi-1.5-34B-Chat', - 'mistralai/Mistral-7B-Instruct-v0.2', + 'mistralai/Mistral-7B-Instruct-v0.3', 'microsoft/Phi-3-mini-4k-instruct', ] - + model_aliases = { - "mistralai/Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.2" + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", + "command-r-plus": "CohereForAI/c4ai-command-r-plus", + "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", + "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", + "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", } @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod def create_completion( cls, model: str, messages: Messages, stream: bool, - **kwargs) -> CreateResult: - - if (model in cls.models) : - + **kwargs + ) -> CreateResult: + model = cls.get_model(model) + + if model in cls.models: session = cf_reqs.Session() session.headers = { 'accept': '*/*', @@ -54,29 +71,24 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', } - - print(model) json_data = { 'model': model, } - response = session.post('https://huggingface.co/chat/conversation', json=json_data) conversationId = response.json()['conversationId'] - response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',) - data: list = (response.json())["nodes"][1]["data"] keys: list[int] = data[data[0]["messages"]] message_keys: dict = data[keys[0]] messageId: str = data[message_keys["id"]] settings = { - "inputs":format_prompt(messages), - "id":messageId, - "is_retry":False, - "is_continue":False, - "web_search":False, - "tools":[] + "inputs": format_prompt(messages), + "id": messageId, + "is_retry": False, + "is_continue": False, + "web_search": False, + "tools": [] } headers = { @@ -96,9 +108,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', } - files = { - 'data': (None, json.dumps(settings, separators=(',', ':'))), + 'data': (None, json.dumps(settings, separators=(',', ':'))), } response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}', @@ -106,7 +117,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): headers=headers, files=files, ) - first_token = True for line in response.iter_lines(): line = json.loads(line) @@ -119,11 +129,10 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): if first_token: token = token.lstrip().replace('\u0000', '') first_token = False - else: token = token.replace('\u0000', '') - yield (token) - + yield token + elif line["type"] == "finalAnswer": break
\ No newline at end of file diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index 6634aa75..a3741196 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -1,20 +1,17 @@ from __future__ import annotations - import json from aiohttp import ClientSession, BaseConnector - from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_connector from ..errors import RateLimitError, ModelNotFoundError from ..requests.raise_for_status import raise_for_status - class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True needs_auth = True supports_message_history = True - default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" + default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', @@ -22,10 +19,30 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', '01-ai/Yi-1.5-34B-Chat', - 'mistralai/Mistral-7B-Instruct-v0.2', + 'mistralai/Mistral-7B-Instruct-v0.3', 'microsoft/Phi-3-mini-4k-instruct', ] + model_aliases = { + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", + "command-r-plus": "CohereForAI/c4ai-command-r-plus", + "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", + "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", + "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + @classmethod async def create_async_generator( cls, @@ -40,10 +57,26 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): temperature: float = 0.7, **kwargs ) -> AsyncResult: - model = cls.get_model(model) if not model else model - headers = {} + model = cls.get_model(model) + headers = { + 'accept': '*/*', + 'accept-language': 'en', + 'cache-control': 'no-cache', + 'origin': 'https://huggingface.co', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://huggingface.co/chat/', + 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', + } if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" + params = { "return_full_text": False, "max_new_tokens": max_new_tokens, @@ -51,6 +84,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): **kwargs } payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream} + async with ClientSession( headers=headers, connector=get_connector(connector, proxy) @@ -72,7 +106,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): yield chunk else: yield (await response.json())[0]["generated_text"].strip() - def format_prompt(messages: Messages) -> str: system_messages = [message["content"] for message in messages if message["role"] == "system"] question = " ".join([messages[-1]["content"], *system_messages]) @@ -81,4 +114,4 @@ def format_prompt(messages: Messages) -> str: for idx, message in enumerate(messages) if message["role"] == "assistant" ]) - return f"{history}<s>[INST] {question} [/INST]" + return f"{history}<s>[INST] {question} [/INST]"
\ No newline at end of file diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py index c708bcb9..6b2d02c3 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/Koala.py @@ -4,7 +4,7 @@ import json from typing import AsyncGenerator, Optional, List, Dict, Union, Any from aiohttp import ClientSession, BaseConnector, ClientResponse -from ..typing import AsyncResult, Messages +from ..typing import Messages from .base_provider import AsyncGeneratorProvider from .helper import get_random_string, get_connector from ..requests import raise_for_status diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index af90860d..2034c34a 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -1,7 +1,7 @@ from __future__ import annotations import uuid - +import requests from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages @@ -9,74 +9,6 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_connector from ..requests import raise_for_status -models = { - "gpt-4o-mini-free": { - "id": "gpt-4o-mini-free", - "name": "GPT-4o-Mini-Free", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 31200, - "tokenLimit": 7800, - "context": "8K", - }, - "gpt-4o-mini": { - "id": "gpt-4o-mini", - "name": "GPT-4o-Mini", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 260000, - "tokenLimit": 126000, - "context": "128K", - }, - "gpt-4o-free": { - "context": "8K", - "id": "gpt-4o-free", - "maxLength": 31200, - "model": "ChatGPT", - "name": "GPT-4o-free", - "provider": "OpenAI", - "tokenLimit": 7800, - }, - "gpt-4-turbo-2024-04-09": { - "id": "gpt-4-turbo-2024-04-09", - "name": "GPT-4-Turbo", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 260000, - "tokenLimit": 126000, - "context": "128K", - }, - "gpt-4o": { - "context": "128K", - "id": "gpt-4o", - "maxLength": 124000, - "model": "ChatGPT", - "name": "GPT-4o", - "provider": "OpenAI", - "tokenLimit": 62000, - }, - "gpt-4-0613": { - "id": "gpt-4-0613", - "name": "GPT-4", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 260000, - "tokenLimit": 126000, - "context": "128K", - }, - "gpt-4-turbo": { - "id": "gpt-4-turbo", - "name": "GPT-4-Turbo", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 260000, - "tokenLimit": 126000, - "context": "128K", - }, -} - - - class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): url = "https://liaobots.site" working = True @@ -85,26 +17,67 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): supports_gpt_35_turbo = True supports_gpt_4 = True default_model = "gpt-4o" - models = list(models.keys()) + models = None model_aliases = { "gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o": "gpt-4o-free", "gpt-4-turbo": "gpt-4-turbo-2024-04-09", - "gpt-4-": "gpt-4-0613", + "gpt-4o": "gpt-4o-2024-08-06", + "gpt-4": "gpt-4-0613", + "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", "claude-3-5-sonnet": "claude-3-5-sonnet-20240620", "claude-3-haiku": "claude-3-haiku-20240307", - "gemini-pro": "gemini-1.5-pro-latest", + "claude-2.1": "claude-2.1", + "gemini-pro": "gemini-1.0-pro-latest", "gemini-flash": "gemini-1.5-flash-latest", + "gemini-pro": "gemini-1.5-pro-latest", } _auth_code = "" _cookie_jar = None @classmethod + def get_models(cls): + if cls.models is None: + url = 'https://liaobots.work/api/models' + headers = { + 'accept': '/', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'cookie': 'gkp2=ehnhUPJtkCgMmod8Sbxn', + 'origin': 'https://liaobots.work', + 'priority': 'u=1, i', + 'referer': 'https://liaobots.work/', + 'sec-ch-ua': '"Chromium";v="127", "Not)A;Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' + } + data = {'key': ''} + + response = requests.post(url, headers=headers, json=data) + + if response.status_code == 200: + try: + models_data = response.json() + cls.models = {model['id']: model for model in models_data} + except (ValueError, KeyError) as e: + print(f"Error processing JSON response: {e}") + cls.models = {} + else: + print(f"Request failed with status code: {response.status_code}") + cls.models = {} + + return cls.models + + @classmethod async def create_async_generator( cls, model: str, @@ -126,9 +99,10 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): cookie_jar=cls._cookie_jar, connector=get_connector(connector, proxy, True) ) as session: + models = cls.get_models() data = { "conversationId": str(uuid.uuid4()), - "model": models[model], + "model": models[cls.get_model(model)], "messages": messages, "key": "", "prompt": kwargs.get("system_message", "You are a helpful assistant."), @@ -141,20 +115,11 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): ) as response: await raise_for_status(response) try: - async with session.post( - "https://liaobots.work/api/user", - json={"authcode": cls._auth_code}, - verify_ssl=False - ) as response: - await raise_for_status(response) - cls._auth_code = (await response.json(content_type=None))["authCode"] - if not cls._auth_code: - raise RuntimeError("Empty auth code") - cls._cookie_jar = session.cookie_jar + await cls.ensure_auth_code(session) async with session.post( "https://liaobots.work/api/chat", json=data, - headers={"x-auth-code": cls._auth_code}, + headers={"x-auth-code": cls._auth_code}, verify_ssl=False ) as response: await raise_for_status(response) @@ -164,16 +129,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): if chunk: yield chunk.decode(errors="ignore") except: - async with session.post( - "https://liaobots.work/api/user", - json={"authcode": "pTIQr4FTnVRfr"}, - verify_ssl=False - ) as response: - await raise_for_status(response) - cls._auth_code = (await response.json(content_type=None))["authCode"] - if not cls._auth_code: - raise RuntimeError("Empty auth code") - cls._cookie_jar = session.cookie_jar + await cls.initialize_auth_code(session) async with session.post( "https://liaobots.work/api/chat", json=data, @@ -186,7 +142,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): raise RuntimeError("Invalid session") if chunk: yield chunk.decode(errors="ignore") - @classmethod def get_model(cls, model: str) -> str: """ @@ -194,15 +149,16 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): """ if model in cls.model_aliases: model = cls.model_aliases[model] + models = cls.get_models() if model not in models: raise ValueError(f"Model '{model}' is not supported.") return model - @classmethod def is_supported(cls, model: str) -> bool: """ Check if the given model is supported. """ + models = cls.get_models() return model in models or model in cls.model_aliases @classmethod @@ -220,7 +176,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): if not cls._auth_code: raise RuntimeError("Empty auth code") cls._cookie_jar = session.cookie_jar - @classmethod async def ensure_auth_code(cls, session: ClientSession) -> None: """ @@ -228,3 +183,18 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): """ if not cls._auth_code: await cls.initialize_auth_code(session) + + @classmethod + async def refresh_auth_code(cls, session: ClientSession) -> None: + """ + Refresh the auth code by making a new request. + """ + await cls.initialize_auth_code(session) + + @classmethod + async def get_auth_code(cls, session: ClientSession) -> str: + """ + Get the current auth code, initializing it if necessary. + """ + await cls.ensure_auth_code(session) + return cls._auth_code
\ No newline at end of file diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py index 6aa407ca..ad04aceb 100644 --- a/g4f/Provider/LiteIcoding.py +++ b/g4f/Provider/LiteIcoding.py @@ -31,7 +31,7 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): headers = { "Accept": "*/*", "Accept-Language": "en-US,en;q=0.9", - "Authorization": "Bearer null", + "Authorization": "Bearer b3b2712cf83640a5acfdc01e78369930", "Connection": "keep-alive", "Content-Type": "application/json;charset=utf-8", "DNT": "1", @@ -74,6 +74,9 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): response.raise_for_status() buffer = "" full_response = "" + def decode_content(data): + bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()]) + return bytes_array.decode('utf-8') async for chunk in response.content.iter_any(): if chunk: buffer += chunk.decode() @@ -83,9 +86,17 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): content = part[6:].strip() if content and content != "[DONE]": content = content.strip('"') - full_response += content - - full_response = full_response.replace('" "', ' ') + # Decoding each content block + decoded_content = decode_content(content) + full_response += decoded_content + full_response = ( + full_response.replace('""', '') # Handle double quotes + .replace('" "', ' ') # Handle space within quotes + .replace("\\n\\n", "\n\n") + .replace("\\n", "\n") + .replace('\\"', '"') + .strip() + ) yield full_response.strip() except ClientResponseError as e: diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py index 8b7473d8..39f062f0 100644 --- a/g4f/Provider/MagickPenAsk.py +++ b/g4f/Provider/MagickPenAsk.py @@ -8,11 +8,11 @@ from .helper import format_prompt class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://api.magickpen.com" - api_endpoint = "/ask" + url = "https://magickpen.com/ask" + api_endpoint = "https://api.magickpen.com/ask" working = True supports_gpt_4 = True - default_model = "gpt-4o" + default_model = "gpt-4o-mini" @classmethod async def create_async_generator( @@ -37,14 +37,14 @@ class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin): "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - 'X-API-Secret': 'WCASR6ZQJYM85DVDX7' + 'X-API-Secret': 'W252GY255JVYBS9NAM' } async with ClientSession(headers=headers) as session: data = { "query": format_prompt(messages), "plan": "Pay as you go" } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content: if chunk: diff --git a/g4f/Provider/MagickPenChat.py b/g4f/Provider/MagickPenChat.py index 6c30028a..ade85c4b 100644 --- a/g4f/Provider/MagickPenChat.py +++ b/g4f/Provider/MagickPenChat.py @@ -8,12 +8,11 @@ from .helper import format_prompt class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://api.magickpen.com" - api_endpoint = "/chat/free" + url = "https://magickpen.com/chat" + api_endpoint = "https://api.magickpen.com/chat/free" working = True supports_gpt_4 = True default_model = "gpt-4o-mini" - @classmethod async def create_async_generator( cls, @@ -37,13 +36,14 @@ class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin): "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", + 'X-Api-Secret': 'W252GY255JVYBS9NAM' } async with ClientSession(headers=headers) as session: data = { "history": [{"role": "user", "content": format_prompt(messages)}] } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content: if chunk: diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index 7f4587e1..3656a39b 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/" class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "mixtral-8x7b-instruct" + default_model = "llama-3.1-8b-instruct" models = [ "llama-3.1-sonar-large-128k-online", "llama-3.1-sonar-small-128k-online", @@ -21,10 +21,6 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", - "gemma-2-9b-it", - "gemma-2-27b-it", - "nemotron-4-340b-instruct", - "mixtral-8x7b-instruct" ] @classmethod diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 47e74ee3..860aef80 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -1,15 +1,19 @@ +from __future__ import annotations + import json from aiohttp import ClientSession -from ..typing import Messages, AsyncResult -from .base_provider import AsyncGeneratorProvider +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + -class Pizzagpt(AsyncGeneratorProvider): +class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" - supports_message_history = False - supports_gpt_35_turbo = True working = True + supports_gpt_4 = True + default_model = 'gpt-4o-mini' @classmethod async def create_async_generator( @@ -19,30 +23,28 @@ class Pizzagpt(AsyncGeneratorProvider): proxy: str = None, **kwargs ) -> AsyncResult: - payload = { - "question": messages[-1]["content"] - } headers = { - "Accept": "application/json", - "Accept-Encoding": "gzip, deflate, br, zstd", - "Accept-Language": "en-US,en;q=0.9", - "Content-Type": "application/json", - "Origin": cls.url, - "Referer": f"{cls.url}/en", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - "X-Secret": "Marinara" + "accept": "application/json", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": cls.url, + "referer": f"{cls.url}/en", + "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", + "x-secret": "Marinara" } - - async with ClientSession() as session: - async with session.post( - f"{cls.url}{cls.api_endpoint}", - json=payload, - proxy=proxy, - headers=headers - ) as response: + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "question": prompt + } + async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() response_json = await response.json() - yield response_json["answer"]["content"] + content = response_json.get("answer", {}).get("content", "") + yield content
\ No newline at end of file diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py index 8465a6c0..f44e0060 100644 --- a/g4f/Provider/Rocks.py +++ b/g4f/Provider/Rocks.py @@ -1,14 +1,17 @@ +import asyncio import json from aiohttp import ClientSession - from ..typing import Messages, AsyncResult from .base_provider import AsyncGeneratorProvider class Rocks(AsyncGeneratorProvider): - url = "https://api.discord.rocks" + url = "https://api.airforce" api_endpoint = "/chat/completions" - supports_message_history = False + supports_message_history = True supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True working = True @classmethod @@ -25,12 +28,13 @@ class Rocks(AsyncGeneratorProvider): "Accept": "application/json", "Accept-Encoding": "gzip, deflate, br, zstd", "Accept-Language": "en-US,en;q=0.9", - "Origin": cls.url, - "Referer": f"{cls.url}/en", + "Authorization": "Bearer missing api key", + "Origin": "https://llmplayground.net", + "Referer": "https://llmplayground.net/", "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", } async with ClientSession() as session: @@ -41,16 +45,26 @@ class Rocks(AsyncGeneratorProvider): headers=headers ) as response: response.raise_for_status() + last_chunk_time = asyncio.get_event_loop().time() + async for line in response.content: - if line.startswith(b"data: "): + current_time = asyncio.get_event_loop().time() + if current_time - last_chunk_time > 5: + return + + if line.startswith(b"\n"): + pass + elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode(): + pass # trolled + elif line.startswith(b"data: "): try: line = json.loads(line[6:]) - except: + except json.JSONDecodeError: continue chunk = line["choices"][0]["delta"].get("content") if chunk: yield chunk - elif line.startswith(b"\n"): - pass + last_chunk_time = current_time else: raise Exception(f"Unexpected line: {line}") + return
\ No newline at end of file diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index cdf5f430..b85854b3 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -19,7 +19,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_gpt_35_turbo = True supports_gpt_4 = True - default_model = "gpt-3.5-turbo" + default_model = "gpt-4o-mini" default_vision_model = "agent" image_models = ["dall-e"] models = [ diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index fa1dcfe5..34b4ef2d 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -12,6 +12,7 @@ from .needs_auth import * from .AI365VIP import AI365VIP from .Allyfy import Allyfy +from .AiChatOnline import AiChatOnline from .Aura import Aura from .Bing import Bing from .BingCreateImages import BingCreateImages diff --git a/g4f/Provider/deprecated/AiChatOnline.py b/g4f/Provider/deprecated/AiChatOnline.py deleted file mode 100644 index e690f28e..00000000 --- a/g4f/Provider/deprecated/AiChatOnline.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider -from ..helper import get_random_string - -class AiChatOnline(AsyncGeneratorProvider): - url = "https://aichatonline.org" - working = False - supports_gpt_35_turbo = True - supports_message_history = False - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", - "Accept": "text/event-stream", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}/chatgpt/chat/", - "Content-Type": "application/json", - "Origin": cls.url, - "Alt-Used": "aichatonline.org", - "Connection": "keep-alive", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "TE": "trailers" - } - async with ClientSession(headers=headers) as session: - data = { - "botId": "default", - "customId": None, - "session": get_random_string(16), - "chatId": get_random_string(), - "contextId": 7, - "messages": messages, - "newMessage": messages[-1]["content"], - "newImageId": None, - "stream": True - } - async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk.startswith(b"data: "): - data = json.loads(chunk[6:]) - if data["type"] == "live": - yield data["data"] - elif data["type"] == "end": - break
\ No newline at end of file diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index 408f3913..bf923f2a 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -25,7 +25,7 @@ from .Aichat import Aichat from .Berlin import Berlin from .Phind import Phind from .AiAsk import AiAsk -from .AiChatOnline import AiChatOnline +from ..AiChatOnline import AiChatOnline from .ChatAnywhere import ChatAnywhere from .FakeGpt import FakeGpt from .GeekGpt import GeekGpt |