From 202b0730ffb0a47360b901f0dd007b13a2f59bb2 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 13:58:09 +0300 Subject: Updated(docs/client.md) --- docs/client.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/client.md b/docs/client.md index 5e6b79ba..e95c510d 100644 --- a/docs/client.md +++ b/docs/client.md @@ -61,8 +61,8 @@ You can use the `ChatCompletions` endpoint to generate text completions as follo ```python from g4f.client import Client -client = Client() +client = Client() response = client.chat.completions.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}], @@ -77,7 +77,6 @@ Also streaming are supported: from g4f.client import Client client = Client() - stream = client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": "Say this is a test"}], -- cgit v1.2.3 From c5b3de1c3145277eb577549d27d04a572b2eeb97 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 13:59:47 +0300 Subject: feat(README.md): update documentation and add new features --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 1e380675..eee01183 100644 --- a/README.md +++ b/README.md @@ -170,8 +170,8 @@ from g4f.client import Client client = Client() response = client.chat.completions.create( model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Hello"}], - ... + messages=[{"role": "user", "content": "Say this is a test"}], + # Add any other necessary parameters ) print(response.choices[0].message.content) ``` @@ -187,11 +187,13 @@ from g4f.client import Client client = Client() response = client.images.generate( - model="gemini", - prompt="a white siamese cat", - ... + model="dall-e-3", + prompt="a white siamese cat", + # Add any other necessary parameters ) + image_url = response.data[0].url +print(f"Generated image URL: {image_url}") ``` [![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md) -- cgit v1.2.3 From c6d11e5cef86ac0bcce1e83435f1206c5acff433 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 15:07:06 +0300 Subject: refactor(g4f/api/__init__.py): use asynchronous methods in Client --- g4f/api/__init__.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 2c723978..da35319a 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -17,7 +17,7 @@ from typing import Union, Optional import g4f import g4f.debug -from g4f.client import AsyncClient +from g4f.client import Client from g4f.typing import Messages from g4f.cookies import read_cookie_files @@ -69,7 +69,7 @@ class AppConfig(): class Api: def __init__(self, app: FastAPI) -> None: self.app = app - self.client = AsyncClient() + self.client = Client() self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key") def register_authorization(self): @@ -156,7 +156,8 @@ class Api: auth_header = auth_header.split(None, 1)[-1] if auth_header and auth_header != "Bearer": config.api_key = auth_header - response = self.client.chat.completions.create( + # Use the asynchronous create method and await it + response = await self.client.chat.completions.async_create( **{ **AppConfig.defaults, **config.dict(exclude_none=True), @@ -164,7 +165,7 @@ class Api: ignored=AppConfig.ignored_providers ) if not config.stream: - return JSONResponse((await response).to_json()) + return JSONResponse(response.to_json()) async def streaming(): try: @@ -196,10 +197,11 @@ class Api: auth_header = auth_header.split(None, 1)[-1] if auth_header and auth_header != "Bearer": config.api_key = auth_header - response = self.client.images.generate( + # Use the asynchronous generate method and await it + response = await self.client.images.async_generate( **config.dict(exclude_none=True), ) - return JSONResponse((await response).to_json()) + return JSONResponse(response.to_json()) except Exception as e: logging.exception(e) return Response(content=format_exception(e, config), status_code=500, media_type="application/json") @@ -232,4 +234,4 @@ def run_api( use_colors=use_colors, factory=True, reload=debug - ) \ No newline at end of file + ) -- cgit v1.2.3 From 2bcb45bfbdd4abfda2965025b2419c47e78b9dc0 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 15:08:16 +0300 Subject: docs/interference.md --- docs/interference.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/interference.md b/docs/interference.md index b140f66a..1b4f0c11 100644 --- a/docs/interference.md +++ b/docs/interference.md @@ -54,7 +54,7 @@ Send the POST request to /v1/chat/completions with body containing the `model` m import requests url = "http://localhost:1337/v1/chat/completions" body = { - "model": "gpt-3.5-turbo-16k", + "model": "gpt-3.5-turbo", "stream": False, "messages": [ {"role": "assistant", "content": "What can you do?"} @@ -66,4 +66,4 @@ for choice in json_response: print(choice.get('message', {}).get('content', '')) ``` -[Return to Home](/) \ No newline at end of file +[Return to Home](/) -- cgit v1.2.3 From 0a73aca3e30584023864beb5a31f8f6402f12218 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 15:19:43 +0300 Subject: Updated(g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 250ffe48..a550c3b6 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -23,18 +23,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): models = [ default_model, 'blackboxai-pro', - "llama-3.1-8b", 'llama-3.1-70b', 'llama-3.1-405b', - 'gpt-4o', - 'gemini-pro', 'gemini-1.5-flash', - 'claude-sonnet-3.5', - 'PythonAgent', 'JavaAgent', 'JavaScriptAgent', @@ -87,7 +82,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'gpt-4o': '@GPT-4o', 'gemini-pro': '@Gemini-PRO', 'claude-sonnet-3.5': '@Claude-Sonnet-3.5', - 'PythonAgent': '@Python Agent', 'JavaAgent': '@Java Agent', 'JavaScriptAgent': '@JavaScript Agent', @@ -149,14 +143,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "content-type": "application/json", "origin": cls.url, "pragma": "no-cache", + "priority": "u=1, i", "referer": cls.model_referers.get(model, cls.url), - "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", } if model in cls.model_prefixes: @@ -221,7 +216,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): else: full_response = "" search_results_json = "" - async for chunk in response.content.iter_any(): + async for chunk, _ in response.content.iter_chunks(): if chunk: decoded_chunk = chunk.decode() decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) -- cgit v1.2.3 From c58335a4b33c9520208821a0c7c947542517e477 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 15:27:40 +0300 Subject: Updated(README.md) --- README.md | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/README.md b/README.md index 1b7aa87e..65d33d88 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,7 @@ from g4f.client import Client client = Client() response = client.chat.completions.create( model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Say this is a test"}], + messages=[{"role": "user", "content": "Hello"}], # Add any other necessary parameters ) print(response.choices[0].message.content) @@ -747,33 +747,6 @@ set G4F_PROXY=http://host:port - - - - VividNode (pyqt-openai) - - - - - Stars - - - - - Forks - - - - - Issues - - - - - Pull Requests - - - -- cgit v1.2.3 From b48757dd4faf0c5ecafa8feed01bc025e3230e12 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 15:29:45 +0300 Subject: Updated(README.md) --- README.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/README.md b/README.md index 65d33d88..17b2e03c 100644 --- a/README.md +++ b/README.md @@ -747,6 +747,33 @@ set G4F_PROXY=http://host:port + + + + VividNode (pyqt-openai) + + + + + Stars + + + + + Forks + + + + + Issues + + + + + Pull Requests + + + -- cgit v1.2.3 From aef705021f6e78d9d312bdb624af3728544ee0da Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 21:07:41 +0300 Subject: Updated(g4f/Provider/ChatifyAI.py) --- g4f/Provider/ChatifyAI.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py index a999afac..7e43b065 100644 --- a/g4f/Provider/ChatifyAI.py +++ b/g4f/Provider/ChatifyAI.py @@ -65,19 +65,15 @@ class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin): response.raise_for_status() response_text = await response.text() - # Фільтруємо та форматуємо відповідь filtered_response = cls.filter_response(response_text) yield filtered_response @staticmethod def filter_response(response_text: str) -> str: - # Розділяємо рядок на частини parts = response_text.split('"') - # Вибираємо лише текстові частини (кожна друга частина) text_parts = parts[1::2] - # Об'єднуємо текстові частини clean_text = ''.join(text_parts) return clean_text -- cgit v1.2.3 From b2b41884469b4bf3412637ffc2c23943c66e2b8f Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 15 Oct 2024 22:01:32 +0300 Subject: refactor(docs/async_client.md): use async_generate for image generation --- docs/async_client.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/async_client.md b/docs/async_client.md index a3f773fa..f5ac5392 100644 --- a/docs/async_client.md +++ b/docs/async_client.md @@ -187,7 +187,7 @@ async def main(): model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}], ) - task2 = client.images.generate( + task2 = client.images.async_generate( model="dall-e-3", prompt="a white siamese cat", ) -- cgit v1.2.3 From 552fcf11f52c981cd7bca2e1c3704361108965f2 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 16 Oct 2024 13:35:07 +0300 Subject: Updated(README.md) --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 17b2e03c..e283300c 100644 --- a/README.md +++ b/README.md @@ -126,13 +126,13 @@ By following these steps, you should be able to successfully install and run the Run the **Webview UI** on other Platfroms: -- [/docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md) +- [docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md) ##### Use your smartphone: Run the Web UI on Your Smartphone: -- [/docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md) +- [docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md) #### Use python @@ -148,17 +148,17 @@ pip install -U g4f[all] ``` How do I install only parts or do disable parts? -Use partial requirements: [/docs/requirements](https://github.com/xtekky/gpt4free/blob/main/docs/requirements.md) +Use partial requirements: [/docs/requirements](docs/requirements.md) ##### Install from source: How do I load the project using git and installing the project requirements? -Read this tutorial and follow it step by step: [/docs/git](https://github.com/xtekky/gpt4free/blob/main/docs/git.md) +Read this tutorial and follow it step by step: [/docs/git](docs/git.md) ##### Install using Docker: How do I build and run composer image from source? -Use docker-compose: [/docs/docker](https://github.com/xtekky/gpt4free/blob/main/docs/docker.md) +Use docker-compose: [/docs/docker](docs/docker.md) ## 💡 Usage @@ -200,9 +200,9 @@ print(f"Generated image URL: {image_url}") **Full Documentation for Python API** -- New AsyncClient API from G4F: [/docs/async_client](https://github.com/xtekky/gpt4free/blob/main/docs/async_client.md) -- Client API like the OpenAI Python library: [/docs/client](https://github.com/xtekky/gpt4free/blob/main/docs/client.md) -- Legacy API with python modules: [/docs/legacy](https://github.com/xtekky/gpt4free/blob/main/docs/legacy.md) +- New AsyncClient API from G4F: [/docs/async_client](docs/async_client.md) +- Client API like the OpenAI Python library: [/docs/client](docs/client.md) +- Legacy API with python modules: [/docs/legacy](docs/legacy.md) #### Web UI @@ -223,7 +223,7 @@ python -m g4f.cli gui -port 8080 -debug You can use the Interference API to serve other OpenAI integrations with G4F. -See docs: [/docs/interference](https://github.com/xtekky/gpt4free/blob/main/docs/interference.md) +See docs: [/docs/interference](docs/interference.md) Access with: http://localhost:1337/v1 @@ -783,11 +783,11 @@ We welcome contributions from the community. Whether you're adding new providers ###### Guide: How do i create a new Provider? -- Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md) +- Read: [/docs/guides/create_provider](docs/guides/create_provider.md) ###### Guide: How can AI help me with writing code? -- Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md) +- Read: [/docs/guides/help_me](docs/guides/help_me.md) ## 🙌 Contributors -- cgit v1.2.3 From a9fb30464fc69792d31ed93c7cd97a1637fbbdcf Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:14:32 +0000 Subject: Update README.md --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e283300c..cf42d3df 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Written by [@xtekky](https://github.com/xtekky)
> [!IMPORTANT] -> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. +> By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. > [!WARNING] > _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control. @@ -126,13 +126,13 @@ By following these steps, you should be able to successfully install and run the Run the **Webview UI** on other Platfroms: -- [docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md) +- [/docs/guides/webview](docs/webview.md) ##### Use your smartphone: Run the Web UI on Your Smartphone: -- [docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md) +- [/docs/guides/phone](/docs/guides/phone.md) #### Use python @@ -196,7 +196,7 @@ image_url = response.data[0].url print(f"Generated image URL: {image_url}") ``` -[![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md) +[![Image with cat](/docs/cat.jpeg)](docs/client.md) **Full Documentation for Python API** -- cgit v1.2.3 From 84714e07c90c383dd0df35a284d8f5dd26212a78 Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:21:37 +0000 Subject: Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cf42d3df..909c03dc 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,7 @@ Run the **Webview UI** on other Platfroms: Run the Web UI on Your Smartphone: -- [/docs/guides/phone](/docs/guides/phone.md) +- [/docs/guides/phone](docs/guides/phone.md) #### Use python -- cgit v1.2.3 From 03f2bc8e5bf8c732cc3faa0e9430b244ea360a1a Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:27:40 +0000 Subject: Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 909c03dc..83e65cf6 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ print(f"Generated image URL: {image_url}") **Full Documentation for Python API** -- New AsyncClient API from G4F: [/docs/async_client](docs/async_client.md) +- AsyncClient API from G4F: [/docs/async_client](docs/async_client.md) - Client API like the OpenAI Python library: [/docs/client](docs/client.md) - Legacy API with python modules: [/docs/legacy](docs/legacy.md) -- cgit v1.2.3 From 4e630d29aa2482cb77e5fa49f7f2c0b65a546544 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 16 Oct 2024 18:29:16 +0300 Subject: feat(g4f/Provider/Blackbox.py): enhance async response handling and cleanup --- g4f/Provider/Blackbox.py | 320 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 221 insertions(+), 99 deletions(-) diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index a550c3b6..886bc2fa 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,19 +1,27 @@ from __future__ import annotations -import re +import asyncio +import aiohttp import random import string import json -from aiohttp import ClientSession +import uuid +import re +from typing import Optional, AsyncGenerator, Union + +from aiohttp import ClientSession, ClientResponseError -from ..typing import AsyncResult, Messages, ImageType -from ..image import ImageResponse, to_data_uri +from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse + class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): + label = "Blackbox AI" url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True + supports_gpt_4 = True supports_stream = True supports_system_message = True supports_message_history = True @@ -23,6 +31,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): models = [ default_model, 'blackboxai-pro', + *image_models, "llama-3.1-8b", 'llama-3.1-70b', 'llama-3.1-405b', @@ -43,7 +52,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'ReactAgent', 'XcodeAgent', 'AngularJSAgent', - *image_models, ] agentMode = { @@ -71,13 +79,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"}, 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"}, } - + userSelectedModel = { "gpt-4o": "gpt-4o", "gemini-pro": "gemini-pro", 'claude-sonnet-3.5': "claude-sonnet-3.5", } - + model_prefixes = { 'gpt-4o': '@GPT-4o', 'gemini-pro': '@Gemini-PRO', @@ -98,14 +106,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'blackboxai-pro': '@BLACKBOXAI-PRO', 'ImageGeneration': '@Image Generation', } - + model_referers = { - "blackboxai": f"{url}/?model=blackboxai", - "gpt-4o": f"{url}/?model=gpt-4o", - "gemini-pro": f"{url}/?model=gemini-pro", - "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5" + "blackboxai": "/?model=blackboxai", + "gpt-4o": "/?model=gpt-4o", + "gemini-pro": "/?model=gemini-pro", + "claude-sonnet-3.5": "/?model=claude-sonnet-3.5" } - + model_aliases = { "gemini-flash": "gemini-1.5-flash", "claude-3.5-sonnet": "claude-sonnet-3.5", @@ -116,69 +124,131 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): def get_model(cls, model: str) -> str: if model in cls.models: return model - elif model in cls.userSelectedModel: - return model elif model in cls.model_aliases: return cls.model_aliases[model] else: return cls.default_model + @staticmethod + def generate_random_string(length: int = 7) -> str: + characters = string.ascii_letters + string.digits + return ''.join(random.choices(characters, k=length)) + + @staticmethod + def generate_next_action() -> str: + return uuid.uuid4().hex + + @staticmethod + def generate_next_router_state_tree() -> str: + router_state = [ + "", + { + "children": [ + "(chat)", + { + "children": [ + "__PAGE__", + {} + ] + } + ] + }, + None, + None, + True + ] + return json.dumps(router_state) + + @staticmethod + def clean_response(text: str) -> str: + pattern = r'^\$\@\$v=undefined-rv1\$\@\$' + cleaned_text = re.sub(pattern, '', text) + return cleaned_text + @classmethod async def create_async_generator( cls, model: str, messages: Messages, - proxy: str = None, - image: ImageType = None, - image_name: str = None, - webSearchMode: bool = False, + proxy: Optional[str] = None, + web_search_mode: bool = False, **kwargs - ) -> AsyncResult: + ) -> AsyncGenerator[Union[str, ImageResponse], None]: + """ + Creates an asynchronous generator for streaming responses from Blackbox AI. + + Parameters: + model (str): Model to use for generating responses. + messages (Messages): Message history. + proxy (Optional[str]): Proxy URL, if needed. + web_search_mode (bool): Enables or disables web search mode. + **kwargs: Additional keyword arguments. + + Yields: + Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects. + """ model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": cls.model_referers.get(model, cls.url), - "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - } - if model in cls.model_prefixes: - prefix = cls.model_prefixes[model] - if not messages[0]['content'].startswith(prefix): - messages[0]['content'] = f"{prefix} {messages[0]['content']}" + chat_id = cls.generate_random_string() + next_action = cls.generate_next_action() + next_router_state_tree = cls.generate_next_router_state_tree() + + agent_mode = cls.agentMode.get(model, {}) + trending_agent_mode = cls.trendingAgentMode.get(model, {}) + + prefix = cls.model_prefixes.get(model, "") - random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) - messages[-1]['id'] = random_id - messages[-1]['role'] = 'user' - - if image is not None: - messages[-1]['data'] = { - 'fileText': '', - 'imageBase64': to_data_uri(image), - 'title': image_name - } - messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content'] + formatted_prompt = "" + for message in messages: + role = message.get('role', '').capitalize() + content = message.get('content', '') + if role and content: + formatted_prompt += f"{role}: {content}\n" - data = { - "messages": messages, - "id": random_id, + if prefix: + formatted_prompt = f"{prefix} {formatted_prompt}".strip() + + referer_path = cls.model_referers.get(model, f"/?model={model}") + referer_url = f"{cls.url}{referer_path}" + + common_headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) ' + 'Chrome/129.0.0.0 Safari/537.36' + } + + headers_api_chat = { + 'Content-Type': 'application/json', + 'Referer': referer_url + } + headers_api_chat_combined = {**common_headers, **headers_api_chat} + + payload_api_chat = { + "messages": [ + { + "id": chat_id, + "content": formatted_prompt, + "role": "user" + } + ], + "id": chat_id, "previewToken": None, "userId": None, "codeModelMode": True, - "agentMode": {}, - "trendingAgentMode": {}, + "agentMode": agent_mode, + "trendingAgentMode": trending_agent_mode, "isMicMode": False, "userSystemPrompt": None, "maxTokens": 1024, @@ -191,47 +261,99 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, - "userSelectedModel": None, - "webSearchMode": webSearchMode, + "webSearchMode": web_search_mode, + "userSelectedModel": cls.userSelectedModel.get(model, model) } - if model in cls.agentMode: - data["agentMode"] = cls.agentMode[model] - elif model in cls.trendingAgentMode: - data["trendingAgentMode"] = cls.trendingAgentMode[model] - elif model in cls.userSelectedModel: - data["userSelectedModel"] = cls.userSelectedModel[model] - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - if model == 'ImageGeneration': - response_text = await response.text() - url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) - if url_match: - image_url = url_match.group(0) - yield ImageResponse(image_url, alt=messages[-1]['content']) - else: - raise Exception("Image URL not found in the response") - else: - full_response = "" - search_results_json = "" - async for chunk, _ in response.content.iter_chunks(): - if chunk: - decoded_chunk = chunk.decode() - decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) - if decoded_chunk.strip(): - if '$~~~$' in decoded_chunk: - search_results_json += decoded_chunk - else: - full_response += decoded_chunk - yield decoded_chunk - - if data["webSearchMode"] and search_results_json: - match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL) + headers_chat = { + 'Accept': 'text/x-component', + 'Content-Type': 'text/plain;charset=UTF-8', + 'Referer': f'{cls.url}/chat/{chat_id}?model={model}', + 'next-action': next_action, + 'next-router-state-tree': next_router_state_tree, + 'next-url': '/' + } + headers_chat_combined = {**common_headers, **headers_chat} + + data_chat = '[]' + + async with ClientSession(headers=common_headers) as session: + try: + async with session.post( + cls.api_endpoint, + headers=headers_api_chat_combined, + json=payload_api_chat, + proxy=proxy + ) as response_api_chat: + response_api_chat.raise_for_status() + text = await response_api_chat.text() + cleaned_response = cls.clean_response(text) + + if model in cls.image_models: + match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response) if match: - search_results = json.loads(match.group(1)) - formatted_results = "\n\n**Sources:**\n" - for i, result in enumerate(search_results[:5], 1): - formatted_results += f"{i}. [{result['title']}]({result['link']})\n" - yield formatted_results + image_url = match.group(1) + image_response = ImageResponse(images=image_url, alt="Generated Image") + yield image_response + else: + yield cleaned_response + else: + if web_search_mode: + match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL) + if match: + source_part = match.group(1).strip() + answer_part = cleaned_response[match.end():].strip() + try: + sources = json.loads(source_part) + source_formatted = "**Source:**\n" + for item in sources: + title = item.get('title', 'No Title') + link = item.get('link', '#') + position = item.get('position', '') + source_formatted += f"{position}. [{title}]({link})\n" + final_response = f"{answer_part}\n\n{source_formatted}" + except json.JSONDecodeError: + final_response = f"{answer_part}\n\nSource information is unavailable." + else: + final_response = cleaned_response + else: + if '$~~~$' in cleaned_response: + final_response = cleaned_response.split('$~~~$')[0].strip() + else: + final_response = cleaned_response + + yield final_response + except ClientResponseError as e: + error_text = f"Error {e.status}: {e.message}" + try: + error_response = await e.response.text() + cleaned_error = cls.clean_response(error_response) + error_text += f" - {cleaned_error}" + except Exception: + pass + yield error_text + except Exception as e: + yield f"Unexpected error during /api/chat request: {str(e)}" + + chat_url = f'{cls.url}/chat/{chat_id}?model={model}' + + try: + async with session.post( + chat_url, + headers=headers_chat_combined, + data=data_chat, + proxy=proxy + ) as response_chat: + response_chat.raise_for_status() + pass + except ClientResponseError as e: + error_text = f"Error {e.status}: {e.message}" + try: + error_response = await e.response.text() + cleaned_error = cls.clean_response(error_response) + error_text += f" - {cleaned_error}" + except Exception: + pass + yield error_text + except Exception as e: + yield f"Unexpected error during /chat/{chat_id} request: {str(e)}" -- cgit v1.2.3 From 1617e515bcec170717a782e20e9870f698fdab5d Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 16 Oct 2024 19:33:06 +0300 Subject: feat(g4f/Provider/HuggingChat.py): add new model support for Nemotron --- g4f/Provider/HuggingChat.py | 2 ++ g4f/models.py | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 30e97d7d..45f3a0d2 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -17,6 +17,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'Qwen/Qwen2.5-72B-Instruct', + 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'meta-llama/Llama-3.2-11B-Vision-Instruct', 'NousResearch/Hermes-3-Llama-3.1-8B', 'mistralai/Mistral-Nemo-Instruct-2407', @@ -27,6 +28,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", diff --git a/g4f/models.py b/g4f/models.py index f124cf86..6505a77a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -707,6 +707,13 @@ cybertron_7b = Model( best_provider = Cloudflare ) +### Nvidia ### +nemotron_70b = Model( + name = 'nemotron-70b', + base_provider = 'Nvidia', + best_provider = IterListProvider([HuggingChat, HuggingFace]) +) + ############# @@ -1070,6 +1077,10 @@ class ModelUtils: 'cybertron-7b': cybertron_7b, +### Nvidia ### +'nemotron-70b': nemotron_70b, + + ############# ### Image ### -- cgit v1.2.3 From 9f394f9613469c7ca56ae3cdc9a198f5c196fc13 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 16 Oct 2024 21:00:08 +0300 Subject: Added new provider (g4f/Provider/AiMathGPT.py) --- g4f/Provider/AiMathGPT.py | 78 +++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 1 + g4f/models.py | 3 +- 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 g4f/Provider/AiMathGPT.py diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py new file mode 100644 index 00000000..4399320a --- /dev/null +++ b/g4f/Provider/AiMathGPT.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aimathgpt.forit.ai" + api_endpoint = "https://aimathgpt.forit.ai/api/ai" + working = True + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'llama3' + models = ['llama3'] + + model_aliases = {"llama-3.1-70b": "llama3",} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f'{cls.url}/', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "system", + "content": "" + }, + { + "role": "user", + "content": format_prompt(messages) + } + ], + "model": model + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + filtered_response = response_data['result']['response'] + yield filtered_response diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 3d6539fc..c065135e 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -18,6 +18,7 @@ from .Allyfy import Allyfy from .AmigoChat import AmigoChat from .AiChatOnline import AiChatOnline from .AiChats import AiChats +from .AiMathGPT import AiMathGPT from .Airforce import Airforce from .Aura import Aura from .Bing import Bing diff --git a/g4f/models.py b/g4f/models.py index 6505a77a..0dd15cf5 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -6,6 +6,7 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( AIChatFree, + AiMathGPT, Airforce, Allyfy, AmigoChat, @@ -213,7 +214,7 @@ llama_3_1_8b = Model( llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, HuggingFace, PerplexityLabs]) + best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, HuggingFace, PerplexityLabs]) ) llama_3_1_405b = Model( -- cgit v1.2.3 From bbf41daf37f6a924b0f2977294afb26456c0dc68 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 16 Oct 2024 21:45:37 +0300 Subject: Added new provider (g4f/Provider/Editee.py) --- g4f/Provider/Editee.py | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 1 + g4f/models.py | 14 +++++++-- 3 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 g4f/Provider/Editee.py diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py new file mode 100644 index 00000000..6d297169 --- /dev/null +++ b/g4f/Provider/Editee.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class Editee(AsyncGeneratorProvider, ProviderModelMixin): + label = "Editee" + url = "https://editee.com" + api_endpoint = "https://editee.com/submit/chatgptfree" + working = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'claude' + models = ['claude', 'gpt4', 'gemini' 'mistrallarge'] + + model_aliases = { + "claude-3.5-sonnet": "claude", + "gpt-4o": "gpt4", + "gemini-pro": "gemini", + "mistral-large": "mistrallarge", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Accept": "application/json, text/plain, */*", + "Accept-Language": "en-US,en;q=0.9", + "Cache-Control": "no-cache", + "Content-Type": "application/json", + "Origin": cls.url, + "Pragma": "no-cache", + "Priority": "u=1, i", + "Referer": f"{cls.url}/chat-gpt", + "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"', + "Sec-CH-UA-Mobile": '?0', + "Sec-CH-UA-Platform": '"Linux"', + "Sec-Fetch-Dest": 'empty', + "Sec-Fetch-Mode": 'cors', + "Sec-Fetch-Site": 'same-origin', + "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + "X-Requested-With": 'XMLHttpRequest', + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "user_input": prompt, + "context": " ", + "template_id": "", + "selected_model": model + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + yield response_data['text'] diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index c065135e..f3593476 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -38,6 +38,7 @@ from .DDG import DDG from .DeepInfra import DeepInfra from .DeepInfraChat import DeepInfraChat from .DeepInfraImage import DeepInfraImage +from .Editee import Editee from .FlowGpt import FlowGpt from .Free2GPT import Free2GPT from .FreeChatgpt import FreeChatgpt diff --git a/g4f/models.py b/g4f/models.py index 0dd15cf5..149e60d7 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -24,6 +24,7 @@ from .Provider import ( DeepInfra, DeepInfraChat, DeepInfraImage, + Editee, Free2GPT, FreeChatgpt, FreeGpt, @@ -128,7 +129,7 @@ gpt_35_turbo = Model( gpt_4o = Model( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Liaobots, Airforce, OpenaiChat]) + best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, Liaobots, Airforce, OpenaiChat]) ) gpt_4o_mini = Model( @@ -288,6 +289,12 @@ mistral_nemo = Model( best_provider = IterListProvider([HuggingChat, HuggingFace]) ) +mistral_large = Model( + name = "mistral-large", + base_provider = "Mistral", + best_provider = Editee +) + ### NousResearch ### mixtral_8x7b_dpo = Model( @@ -333,7 +340,7 @@ phi_3_5_mini = Model( gemini_pro = Model( name = 'gemini-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Liaobots, Airforce]) + best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Editee, Liaobots, Airforce]) ) gemini_flash = Model( @@ -417,7 +424,7 @@ claude_3_haiku = Model( claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, Airforce, AmigoChat, Liaobots]) + best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, Liaobots]) ) @@ -917,6 +924,7 @@ class ModelUtils: 'mixtral-8x7b': mixtral_8x7b, 'mixtral-8x22b': mixtral_8x22b, 'mistral-nemo': mistral_nemo, +'mistral-large': mistral_large, ### NousResearch ### -- cgit v1.2.3 From da4b0831d9e3e941fa9e944519eb5e2bdd95bd1b Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 17 Oct 2024 13:13:29 +0300 Subject: Update (g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 886bc2fa..6c45a628 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -171,7 +171,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: Optional[str] = None, - web_search_mode: bool = False, + websearch: bool = False, **kwargs ) -> AsyncGenerator[Union[str, ImageResponse], None]: """ @@ -261,7 +261,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, - "webSearchMode": web_search_mode, + "webSearchMode": websearch, "userSelectedModel": cls.userSelectedModel.get(model, model) } -- cgit v1.2.3 From 48e8cbfb6d3f15e3739b8ef22086e37afc0c870b Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 17 Oct 2024 13:17:38 +0300 Subject: Added new provider (g4f/Provider/RubiksAI.py) --- g4f/Provider/RubiksAI.py | 163 +++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 1 + g4f/models.py | 5 +- 3 files changed, 167 insertions(+), 2 deletions(-) create mode 100644 g4f/Provider/RubiksAI.py diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py new file mode 100644 index 00000000..184322c8 --- /dev/null +++ b/g4f/Provider/RubiksAI.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +import asyncio +import aiohttp +import random +import string +import json +from urllib.parse import urlencode + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Rubiks AI" + url = "https://rubiks.ai" + api_endpoint = "https://rubiks.ai/search/api.php" + working = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-versatile' + models = [default_model, 'gpt-4o-mini'] + + model_aliases = { + "llama-3.1-70b": "llama-3.1-70b-versatile", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @staticmethod + def generate_mid() -> str: + """ + Generates a 'mid' string following the pattern: + 6 characters - 4 characters - 4 characters - 4 characters - 12 characters + Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4 + """ + parts = [ + ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=12)) + ] + return '-'.join(parts) + + @staticmethod + def create_referer(q: str, mid: str, model: str = '') -> str: + """ + Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding. + """ + params = {'q': q, 'model': model, 'mid': mid} + encoded_params = urlencode(params) + return f'https://rubiks.ai/search/?{encoded_params}' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + websearch: bool = False, + **kwargs + ) -> AsyncResult: + """ + Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response. + + Parameters: + - model (str): The model to use in the request. + - messages (Messages): The messages to send as a prompt. + - proxy (str, optional): Proxy URL, if needed. + - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False. + """ + model = cls.get_model(model) + prompt = format_prompt(messages) + q_value = prompt + mid_value = cls.generate_mid() + referer = cls.create_referer(q=q_value, mid=mid_value, model=model) + + url = cls.api_endpoint + params = { + 'q': q_value, + 'model': model, + 'id': '', + 'mid': mid_value + } + + headers = { + 'Accept': 'text/event-stream', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Pragma': 'no-cache', + 'Referer': referer, + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + try: + timeout = aiohttp.ClientTimeout(total=None) + async with ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers, params=params, proxy=proxy) as response: + if response.status != 200: + yield f"Request ended with status code {response.status}" + return + + assistant_text = '' + sources = [] + + async for line in response.content: + decoded_line = line.decode('utf-8').strip() + if not decoded_line.startswith('data: '): + continue + data = decoded_line[6:] + if data in ('[DONE]', '{"done": ""}'): + break + try: + json_data = json.loads(data) + except json.JSONDecodeError: + continue + + if 'url' in json_data and 'title' in json_data: + if websearch: + sources.append({'title': json_data['title'], 'url': json_data['url']}) + + elif 'choices' in json_data: + for choice in json_data['choices']: + delta = choice.get('delta', {}) + content = delta.get('content', '') + role = delta.get('role', '') + if role == 'assistant': + continue + assistant_text += content + + if websearch and sources: + sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)]) + assistant_text += f"\n\n**Source:**\n{sources_text}" + + yield assistant_text + + except asyncio.CancelledError: + yield "The request was cancelled." + except aiohttp.ClientError as e: + yield f"An error occurred during the request: {e}" + except Exception as e: + yield f"An unexpected error occurred: {e}" diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index f3593476..9caa92d3 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -63,6 +63,7 @@ from .Prodia import Prodia from .Reka import Reka from .Replicate import Replicate from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything from .Upstage import Upstage from .WhiteRabbitNeo import WhiteRabbitNeo diff --git a/g4f/models.py b/g4f/models.py index 149e60d7..1e80a924 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -58,6 +58,7 @@ from .Provider import ( Reka, Replicate, ReplicateHome, + RubiksAI, TeachAnything, Upstage, ) @@ -135,7 +136,7 @@ gpt_4o = Model( gpt_4o_mini = Model( name = 'gpt-4o-mini', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt]) + best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt]) ) gpt_4_turbo = Model( @@ -215,7 +216,7 @@ llama_3_1_8b = Model( llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, HuggingFace, PerplexityLabs]) + best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, HuggingFace, PerplexityLabs]) ) llama_3_1_405b = Model( -- cgit v1.2.3 From 4cd1ed1a71ded59fb18d60a45c655e711200480a Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 17 Oct 2024 15:44:27 +0300 Subject: Added new provider (g4f/Provider/Ai4Chat.py) --- g4f/Provider/Ai4Chat.py | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 1 + g4f/models.py | 6 ++++- 3 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 g4f/Provider/Ai4Chat.py diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py new file mode 100644 index 00000000..81633b7a --- /dev/null +++ b/g4f/Provider/Ai4Chat.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import re + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.ai4chat.co" + api_endpoint = "https://www.ai4chat.co/generate-response" + working = True + supports_gpt_4 = False + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4' + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'cookie': 'messageCount=2', + 'origin': 'https://www.ai4chat.co', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://www.ai4chat.co/gpt/talkdirtytome', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + payload = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ] + } + + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + message = response_data.get('message', '') + clean_message = re.sub('<[^<]+?>', '', message).strip() + yield clean_message diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 9caa92d3..c794dd0b 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -11,6 +11,7 @@ from .needs_auth import * from .nexra import * +from .Ai4Chat import Ai4Chat from .AI365VIP import AI365VIP from .AIChatFree import AIChatFree from .AIUncensored import AIUncensored diff --git a/g4f/models.py b/g4f/models.py index 1e80a924..e84f9103 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -5,6 +5,7 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( + Ai4Chat, AIChatFree, AiMathGPT, Airforce, @@ -104,6 +105,9 @@ default = Model( AmigoChat, ChatifyAI, Cloudflare, + Ai4Chat, + Editee, + AiMathGPT, ]) ) @@ -148,7 +152,7 @@ gpt_4_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Chatgpt4Online, Bing, OpenaiChat]) + best_provider = IterListProvider([NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Ai4Chat, Airforce, Chatgpt4Online, Bing, OpenaiChat]) ) # o1 -- cgit v1.2.3 From d9892cebfd6ccfbb0f0e231fa8302408bea7edca Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 17 Oct 2024 15:47:36 +0300 Subject: Update (g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 6c45a628..317df1d4 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -181,7 +181,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): model (str): Model to use for generating responses. messages (Messages): Message history. proxy (Optional[str]): Proxy URL, if needed. - web_search_mode (bool): Enables or disables web search mode. + websearch (bool): Enables or disables web search mode. **kwargs: Additional keyword arguments. Yields: @@ -298,7 +298,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): else: yield cleaned_response else: - if web_search_mode: + if websearch: match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL) if match: source_part = match.group(1).strip() -- cgit v1.2.3