diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-10-17 17:56:51 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-17 17:56:51 +0200 |
commit | 66a305998d47e724efaea696bf352428cfcd8291 (patch) | |
tree | e372492a190abfa1254e6b05afea8d154aa48225 /g4f/Provider | |
parent | Merge pull request #2275 from hansipie/setollamahost (diff) | |
parent | Update (g4f/Provider/Blackbox.py) (diff) | |
download | gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.gz gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.bz2 gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.lz gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.xz gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.zst gpt4free-66a305998d47e724efaea696bf352428cfcd8291.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Ai4Chat.py | 70 | ||||
-rw-r--r-- | g4f/Provider/AiMathGPT.py | 78 | ||||
-rw-r--r-- | g4f/Provider/Blackbox.py | 325 | ||||
-rw-r--r-- | g4f/Provider/ChatifyAI.py | 4 | ||||
-rw-r--r-- | g4f/Provider/Editee.py | 78 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 2 | ||||
-rw-r--r-- | g4f/Provider/RubiksAI.py | 163 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 4 |
8 files changed, 616 insertions, 108 deletions
diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py new file mode 100644 index 00000000..81633b7a --- /dev/null +++ b/g4f/Provider/Ai4Chat.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import re + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.ai4chat.co" + api_endpoint = "https://www.ai4chat.co/generate-response" + working = True + supports_gpt_4 = False + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4' + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'cookie': 'messageCount=2', + 'origin': 'https://www.ai4chat.co', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://www.ai4chat.co/gpt/talkdirtytome', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + payload = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ] + } + + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + message = response_data.get('message', '') + clean_message = re.sub('<[^<]+?>', '', message).strip() + yield clean_message diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py new file mode 100644 index 00000000..4399320a --- /dev/null +++ b/g4f/Provider/AiMathGPT.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aimathgpt.forit.ai" + api_endpoint = "https://aimathgpt.forit.ai/api/ai" + working = True + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'llama3' + models = ['llama3'] + + model_aliases = {"llama-3.1-70b": "llama3",} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f'{cls.url}/', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "system", + "content": "" + }, + { + "role": "user", + "content": format_prompt(messages) + } + ], + "model": model + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + filtered_response = response_data['result']['response'] + yield filtered_response diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 250ffe48..317df1d4 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,19 +1,27 @@ from __future__ import annotations -import re +import asyncio +import aiohttp import random import string import json -from aiohttp import ClientSession +import uuid +import re +from typing import Optional, AsyncGenerator, Union + +from aiohttp import ClientSession, ClientResponseError -from ..typing import AsyncResult, Messages, ImageType -from ..image import ImageResponse, to_data_uri +from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse + class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): + label = "Blackbox AI" url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True + supports_gpt_4 = True supports_stream = True supports_system_message = True supports_message_history = True @@ -23,18 +31,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): models = [ default_model, 'blackboxai-pro', - + *image_models, "llama-3.1-8b", 'llama-3.1-70b', 'llama-3.1-405b', - 'gpt-4o', - 'gemini-pro', 'gemini-1.5-flash', - 'claude-sonnet-3.5', - 'PythonAgent', 'JavaAgent', 'JavaScriptAgent', @@ -48,7 +52,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'ReactAgent', 'XcodeAgent', 'AngularJSAgent', - *image_models, ] agentMode = { @@ -76,18 +79,17 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"}, 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"}, } - + userSelectedModel = { "gpt-4o": "gpt-4o", "gemini-pro": "gemini-pro", 'claude-sonnet-3.5': "claude-sonnet-3.5", } - + model_prefixes = { 'gpt-4o': '@GPT-4o', 'gemini-pro': '@Gemini-PRO', 'claude-sonnet-3.5': '@Claude-Sonnet-3.5', - 'PythonAgent': '@Python Agent', 'JavaAgent': '@Java Agent', 'JavaScriptAgent': '@JavaScript Agent', @@ -104,14 +106,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'blackboxai-pro': '@BLACKBOXAI-PRO', 'ImageGeneration': '@Image Generation', } - + model_referers = { - "blackboxai": f"{url}/?model=blackboxai", - "gpt-4o": f"{url}/?model=gpt-4o", - "gemini-pro": f"{url}/?model=gemini-pro", - "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5" + "blackboxai": "/?model=blackboxai", + "gpt-4o": "/?model=gpt-4o", + "gemini-pro": "/?model=gemini-pro", + "claude-sonnet-3.5": "/?model=claude-sonnet-3.5" } - + model_aliases = { "gemini-flash": "gemini-1.5-flash", "claude-3.5-sonnet": "claude-sonnet-3.5", @@ -122,68 +124,131 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): def get_model(cls, model: str) -> str: if model in cls.models: return model - elif model in cls.userSelectedModel: - return model elif model in cls.model_aliases: return cls.model_aliases[model] else: return cls.default_model + @staticmethod + def generate_random_string(length: int = 7) -> str: + characters = string.ascii_letters + string.digits + return ''.join(random.choices(characters, k=length)) + + @staticmethod + def generate_next_action() -> str: + return uuid.uuid4().hex + + @staticmethod + def generate_next_router_state_tree() -> str: + router_state = [ + "", + { + "children": [ + "(chat)", + { + "children": [ + "__PAGE__", + {} + ] + } + ] + }, + None, + None, + True + ] + return json.dumps(router_state) + + @staticmethod + def clean_response(text: str) -> str: + pattern = r'^\$\@\$v=undefined-rv1\$\@\$' + cleaned_text = re.sub(pattern, '', text) + return cleaned_text + @classmethod async def create_async_generator( cls, model: str, messages: Messages, - proxy: str = None, - image: ImageType = None, - image_name: str = None, - webSearchMode: bool = False, + proxy: Optional[str] = None, + websearch: bool = False, **kwargs - ) -> AsyncResult: + ) -> AsyncGenerator[Union[str, ImageResponse], None]: + """ + Creates an asynchronous generator for streaming responses from Blackbox AI. + + Parameters: + model (str): Model to use for generating responses. + messages (Messages): Message history. + proxy (Optional[str]): Proxy URL, if needed. + websearch (bool): Enables or disables web search mode. + **kwargs: Additional keyword arguments. + + Yields: + Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects. + """ model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "referer": cls.model_referers.get(model, cls.url), - "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" - } - if model in cls.model_prefixes: - prefix = cls.model_prefixes[model] - if not messages[0]['content'].startswith(prefix): - messages[0]['content'] = f"{prefix} {messages[0]['content']}" + chat_id = cls.generate_random_string() + next_action = cls.generate_next_action() + next_router_state_tree = cls.generate_next_router_state_tree() + + agent_mode = cls.agentMode.get(model, {}) + trending_agent_mode = cls.trendingAgentMode.get(model, {}) + + prefix = cls.model_prefixes.get(model, "") - random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) - messages[-1]['id'] = random_id - messages[-1]['role'] = 'user' - - if image is not None: - messages[-1]['data'] = { - 'fileText': '', - 'imageBase64': to_data_uri(image), - 'title': image_name - } - messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content'] + formatted_prompt = "" + for message in messages: + role = message.get('role', '').capitalize() + content = message.get('content', '') + if role and content: + formatted_prompt += f"{role}: {content}\n" - data = { - "messages": messages, - "id": random_id, + if prefix: + formatted_prompt = f"{prefix} {formatted_prompt}".strip() + + referer_path = cls.model_referers.get(model, f"/?model={model}") + referer_url = f"{cls.url}{referer_path}" + + common_headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) ' + 'Chrome/129.0.0.0 Safari/537.36' + } + + headers_api_chat = { + 'Content-Type': 'application/json', + 'Referer': referer_url + } + headers_api_chat_combined = {**common_headers, **headers_api_chat} + + payload_api_chat = { + "messages": [ + { + "id": chat_id, + "content": formatted_prompt, + "role": "user" + } + ], + "id": chat_id, "previewToken": None, "userId": None, "codeModelMode": True, - "agentMode": {}, - "trendingAgentMode": {}, + "agentMode": agent_mode, + "trendingAgentMode": trending_agent_mode, "isMicMode": False, "userSystemPrompt": None, "maxTokens": 1024, @@ -196,47 +261,99 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, - "userSelectedModel": None, - "webSearchMode": webSearchMode, + "webSearchMode": websearch, + "userSelectedModel": cls.userSelectedModel.get(model, model) } - if model in cls.agentMode: - data["agentMode"] = cls.agentMode[model] - elif model in cls.trendingAgentMode: - data["trendingAgentMode"] = cls.trendingAgentMode[model] - elif model in cls.userSelectedModel: - data["userSelectedModel"] = cls.userSelectedModel[model] - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - if model == 'ImageGeneration': - response_text = await response.text() - url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) - if url_match: - image_url = url_match.group(0) - yield ImageResponse(image_url, alt=messages[-1]['content']) - else: - raise Exception("Image URL not found in the response") - else: - full_response = "" - search_results_json = "" - async for chunk in response.content.iter_any(): - if chunk: - decoded_chunk = chunk.decode() - decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) - if decoded_chunk.strip(): - if '$~~~$' in decoded_chunk: - search_results_json += decoded_chunk - else: - full_response += decoded_chunk - yield decoded_chunk - - if data["webSearchMode"] and search_results_json: - match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL) + headers_chat = { + 'Accept': 'text/x-component', + 'Content-Type': 'text/plain;charset=UTF-8', + 'Referer': f'{cls.url}/chat/{chat_id}?model={model}', + 'next-action': next_action, + 'next-router-state-tree': next_router_state_tree, + 'next-url': '/' + } + headers_chat_combined = {**common_headers, **headers_chat} + + data_chat = '[]' + + async with ClientSession(headers=common_headers) as session: + try: + async with session.post( + cls.api_endpoint, + headers=headers_api_chat_combined, + json=payload_api_chat, + proxy=proxy + ) as response_api_chat: + response_api_chat.raise_for_status() + text = await response_api_chat.text() + cleaned_response = cls.clean_response(text) + + if model in cls.image_models: + match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response) if match: - search_results = json.loads(match.group(1)) - formatted_results = "\n\n**Sources:**\n" - for i, result in enumerate(search_results[:5], 1): - formatted_results += f"{i}. [{result['title']}]({result['link']})\n" - yield formatted_results + image_url = match.group(1) + image_response = ImageResponse(images=image_url, alt="Generated Image") + yield image_response + else: + yield cleaned_response + else: + if websearch: + match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL) + if match: + source_part = match.group(1).strip() + answer_part = cleaned_response[match.end():].strip() + try: + sources = json.loads(source_part) + source_formatted = "**Source:**\n" + for item in sources: + title = item.get('title', 'No Title') + link = item.get('link', '#') + position = item.get('position', '') + source_formatted += f"{position}. [{title}]({link})\n" + final_response = f"{answer_part}\n\n{source_formatted}" + except json.JSONDecodeError: + final_response = f"{answer_part}\n\nSource information is unavailable." + else: + final_response = cleaned_response + else: + if '$~~~$' in cleaned_response: + final_response = cleaned_response.split('$~~~$')[0].strip() + else: + final_response = cleaned_response + + yield final_response + except ClientResponseError as e: + error_text = f"Error {e.status}: {e.message}" + try: + error_response = await e.response.text() + cleaned_error = cls.clean_response(error_response) + error_text += f" - {cleaned_error}" + except Exception: + pass + yield error_text + except Exception as e: + yield f"Unexpected error during /api/chat request: {str(e)}" + + chat_url = f'{cls.url}/chat/{chat_id}?model={model}' + + try: + async with session.post( + chat_url, + headers=headers_chat_combined, + data=data_chat, + proxy=proxy + ) as response_chat: + response_chat.raise_for_status() + pass + except ClientResponseError as e: + error_text = f"Error {e.status}: {e.message}" + try: + error_response = await e.response.text() + cleaned_error = cls.clean_response(error_response) + error_text += f" - {cleaned_error}" + except Exception: + pass + yield error_text + except Exception as e: + yield f"Unexpected error during /chat/{chat_id} request: {str(e)}" diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py index a999afac..7e43b065 100644 --- a/g4f/Provider/ChatifyAI.py +++ b/g4f/Provider/ChatifyAI.py @@ -65,19 +65,15 @@ class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin): response.raise_for_status() response_text = await response.text() - # Фільтруємо та форматуємо відповідь filtered_response = cls.filter_response(response_text) yield filtered_response @staticmethod def filter_response(response_text: str) -> str: - # Розділяємо рядок на частини parts = response_text.split('"') - # Вибираємо лише текстові частини (кожна друга частина) text_parts = parts[1::2] - # Об'єднуємо текстові частини clean_text = ''.join(text_parts) return clean_text diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py new file mode 100644 index 00000000..6d297169 --- /dev/null +++ b/g4f/Provider/Editee.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class Editee(AsyncGeneratorProvider, ProviderModelMixin): + label = "Editee" + url = "https://editee.com" + api_endpoint = "https://editee.com/submit/chatgptfree" + working = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'claude' + models = ['claude', 'gpt4', 'gemini' 'mistrallarge'] + + model_aliases = { + "claude-3.5-sonnet": "claude", + "gpt-4o": "gpt4", + "gemini-pro": "gemini", + "mistral-large": "mistrallarge", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Accept": "application/json, text/plain, */*", + "Accept-Language": "en-US,en;q=0.9", + "Cache-Control": "no-cache", + "Content-Type": "application/json", + "Origin": cls.url, + "Pragma": "no-cache", + "Priority": "u=1, i", + "Referer": f"{cls.url}/chat-gpt", + "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"', + "Sec-CH-UA-Mobile": '?0', + "Sec-CH-UA-Platform": '"Linux"', + "Sec-Fetch-Dest": 'empty', + "Sec-Fetch-Mode": 'cors', + "Sec-Fetch-Site": 'same-origin', + "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + "X-Requested-With": 'XMLHttpRequest', + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "user_input": prompt, + "context": " ", + "template_id": "", + "selected_model": model + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + yield response_data['text'] diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 30e97d7d..45f3a0d2 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -17,6 +17,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'Qwen/Qwen2.5-72B-Instruct', + 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'meta-llama/Llama-3.2-11B-Vision-Instruct', 'NousResearch/Hermes-3-Llama-3.1-8B', 'mistralai/Mistral-Nemo-Instruct-2407', @@ -27,6 +28,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py new file mode 100644 index 00000000..184322c8 --- /dev/null +++ b/g4f/Provider/RubiksAI.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +import asyncio +import aiohttp +import random +import string +import json +from urllib.parse import urlencode + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Rubiks AI" + url = "https://rubiks.ai" + api_endpoint = "https://rubiks.ai/search/api.php" + working = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-versatile' + models = [default_model, 'gpt-4o-mini'] + + model_aliases = { + "llama-3.1-70b": "llama-3.1-70b-versatile", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @staticmethod + def generate_mid() -> str: + """ + Generates a 'mid' string following the pattern: + 6 characters - 4 characters - 4 characters - 4 characters - 12 characters + Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4 + """ + parts = [ + ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=12)) + ] + return '-'.join(parts) + + @staticmethod + def create_referer(q: str, mid: str, model: str = '') -> str: + """ + Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding. + """ + params = {'q': q, 'model': model, 'mid': mid} + encoded_params = urlencode(params) + return f'https://rubiks.ai/search/?{encoded_params}' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + websearch: bool = False, + **kwargs + ) -> AsyncResult: + """ + Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response. + + Parameters: + - model (str): The model to use in the request. + - messages (Messages): The messages to send as a prompt. + - proxy (str, optional): Proxy URL, if needed. + - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False. + """ + model = cls.get_model(model) + prompt = format_prompt(messages) + q_value = prompt + mid_value = cls.generate_mid() + referer = cls.create_referer(q=q_value, mid=mid_value, model=model) + + url = cls.api_endpoint + params = { + 'q': q_value, + 'model': model, + 'id': '', + 'mid': mid_value + } + + headers = { + 'Accept': 'text/event-stream', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Pragma': 'no-cache', + 'Referer': referer, + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + try: + timeout = aiohttp.ClientTimeout(total=None) + async with ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers, params=params, proxy=proxy) as response: + if response.status != 200: + yield f"Request ended with status code {response.status}" + return + + assistant_text = '' + sources = [] + + async for line in response.content: + decoded_line = line.decode('utf-8').strip() + if not decoded_line.startswith('data: '): + continue + data = decoded_line[6:] + if data in ('[DONE]', '{"done": ""}'): + break + try: + json_data = json.loads(data) + except json.JSONDecodeError: + continue + + if 'url' in json_data and 'title' in json_data: + if websearch: + sources.append({'title': json_data['title'], 'url': json_data['url']}) + + elif 'choices' in json_data: + for choice in json_data['choices']: + delta = choice.get('delta', {}) + content = delta.get('content', '') + role = delta.get('role', '') + if role == 'assistant': + continue + assistant_text += content + + if websearch and sources: + sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)]) + assistant_text += f"\n\n**Source:**\n{sources_text}" + + yield assistant_text + + except asyncio.CancelledError: + yield "The request was cancelled." + except aiohttp.ClientError as e: + yield f"An error occurred during the request: {e}" + except Exception as e: + yield f"An unexpected error occurred: {e}" diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 3d6539fc..c794dd0b 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -11,6 +11,7 @@ from .needs_auth import * from .nexra import * +from .Ai4Chat import Ai4Chat from .AI365VIP import AI365VIP from .AIChatFree import AIChatFree from .AIUncensored import AIUncensored @@ -18,6 +19,7 @@ from .Allyfy import Allyfy from .AmigoChat import AmigoChat from .AiChatOnline import AiChatOnline from .AiChats import AiChats +from .AiMathGPT import AiMathGPT from .Airforce import Airforce from .Aura import Aura from .Bing import Bing @@ -37,6 +39,7 @@ from .DDG import DDG from .DeepInfra import DeepInfra from .DeepInfraChat import DeepInfraChat from .DeepInfraImage import DeepInfraImage +from .Editee import Editee from .FlowGpt import FlowGpt from .Free2GPT import Free2GPT from .FreeChatgpt import FreeChatgpt @@ -61,6 +64,7 @@ from .Prodia import Prodia from .Reka import Reka from .Replicate import Replicate from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything from .Upstage import Upstage from .WhiteRabbitNeo import WhiteRabbitNeo |