diff options
author | zukixa <56563509+zukixa@users.noreply.github.com> | 2024-08-25 10:00:06 +0200 |
---|---|---|
committer | zukixa <56563509+zukixa@users.noreply.github.com> | 2024-08-25 10:00:06 +0200 |
commit | a338ed588397737d3c1be66ee4f15d51c84f1770 (patch) | |
tree | 0de45ef57733bfb8c3c38e74dddabeb697ee445a /g4f/Provider | |
parent | . (diff) | |
download | gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar.gz gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar.bz2 gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar.lz gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar.xz gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.tar.zst gpt4free-a338ed588397737d3c1be66ee4f15d51c84f1770.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Aura.py | 4 | ||||
-rw-r--r-- | g4f/Provider/Blackbox.py | 2 | ||||
-rw-r--r-- | g4f/Provider/ChatgptFree.py | 9 | ||||
-rw-r--r-- | g4f/Provider/Feedough.py | 78 | ||||
-rw-r--r-- | g4f/Provider/LiteIcoding.py | 19 | ||||
-rw-r--r-- | g4f/Provider/MagickPenAsk.py | 2 | ||||
-rw-r--r-- | g4f/Provider/MagickPenChat.py | 3 | ||||
-rw-r--r-- | g4f/Provider/PerplexityLabs.py | 6 | ||||
-rw-r--r-- | g4f/Provider/Rocks.py | 34 |
9 files changed, 53 insertions, 104 deletions
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py index 7e2b2831..4a8d0a55 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/Aura.py @@ -33,8 +33,8 @@ class Aura(AsyncGeneratorProvider): new_messages.append(message) data = { "model": { - "id": "openchat_v3.2_mistral", - "name": "OpenChat Aura", + "id": "openchat_3.6", + "name": "OpenChat 3.6 (latest)", "maxLength": 24576, "tokenLimit": max_tokens }, diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index a86471f2..3ba7abf4 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -67,7 +67,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): async with session.post( f"{cls.url}/api/chat", json=data, proxy=proxy - ) as response: # type: ClientResponse + ) as response: response.raise_for_status() async for chunk in response.content.iter_any(): if chunk: diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index b1e00a22..11b2e5f5 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -1,6 +1,7 @@ from __future__ import annotations import re +import json from ..requests import StreamSession, raise_for_status from ..typing import Messages @@ -74,6 +75,10 @@ class ChatgptFree(AsyncProvider): "message": prompt, "bot_id": "0" } - async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: + async with session.get(f"{cls.url}/wp-admin/admin-ajax.php", params=data, cookies=cookies) as response: await raise_for_status(response) - return (await response.json())["data"]
\ No newline at end of file + full_answer = "" + for line in ((await response.text()).splitlines())[:-1]: + if line.startswith("data:") and "[DONE]" not in line: + full_answer += json.loads(line[5:])['choices'][0]['delta'].get('content', "") + return full_answer
\ No newline at end of file diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py deleted file mode 100644 index d35e30ee..00000000 --- a/g4f/Provider/Feedough.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import annotations - -import json -import asyncio -from aiohttp import ClientSession, TCPConnector -from urllib.parse import urlencode - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Feedough(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.feedough.com" - api_endpoint = "/wp-admin/admin-ajax.php" - working = True - default_model = '' - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/x-www-form-urlencoded;charset=UTF-8", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/ai-prompt-generator/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" - } - - connector = TCPConnector(ssl=False) - - async with ClientSession(headers=headers, connector=connector) as session: - data = { - "action": "aixg_generate", - "prompt": format_prompt(messages), - "aixg_generate_nonce": "110c021031" - } - - try: - async with session.post( - f"{cls.url}{cls.api_endpoint}", - data=urlencode(data), - proxy=proxy - ) as response: - response.raise_for_status() - response_text = await response.text() - try: - response_json = json.loads(response_text) - if response_json.get("success") and "data" in response_json: - message = response_json["data"].get("message", "") - yield message - except json.JSONDecodeError: - yield response_text - except Exception as e: - print(f"An error occurred: {e}") - - @classmethod - async def run(cls, *args, **kwargs): - async for item in cls.create_async_generator(*args, **kwargs): - yield item - - tasks = asyncio.all_tasks() - for task in tasks: - if not task.done(): - await task diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py index 6aa407ca..ad04aceb 100644 --- a/g4f/Provider/LiteIcoding.py +++ b/g4f/Provider/LiteIcoding.py @@ -31,7 +31,7 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): headers = { "Accept": "*/*", "Accept-Language": "en-US,en;q=0.9", - "Authorization": "Bearer null", + "Authorization": "Bearer b3b2712cf83640a5acfdc01e78369930", "Connection": "keep-alive", "Content-Type": "application/json;charset=utf-8", "DNT": "1", @@ -74,6 +74,9 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): response.raise_for_status() buffer = "" full_response = "" + def decode_content(data): + bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()]) + return bytes_array.decode('utf-8') async for chunk in response.content.iter_any(): if chunk: buffer += chunk.decode() @@ -83,9 +86,17 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): content = part[6:].strip() if content and content != "[DONE]": content = content.strip('"') - full_response += content - - full_response = full_response.replace('" "', ' ') + # Decoding each content block + decoded_content = decode_content(content) + full_response += decoded_content + full_response = ( + full_response.replace('""', '') # Handle double quotes + .replace('" "', ' ') # Handle space within quotes + .replace("\\n\\n", "\n\n") + .replace("\\n", "\n") + .replace('\\"', '"') + .strip() + ) yield full_response.strip() except ClientResponseError as e: diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py index 8b7473d8..21a9e422 100644 --- a/g4f/Provider/MagickPenAsk.py +++ b/g4f/Provider/MagickPenAsk.py @@ -37,7 +37,7 @@ class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin): "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - 'X-API-Secret': 'WCASR6ZQJYM85DVDX7' + 'X-API-Secret': 'W252GY255JVYBS9NAM' # this for some reason is just hardcoded in the .js, it makes no sense } async with ClientSession(headers=headers) as session: data = { diff --git a/g4f/Provider/MagickPenChat.py b/g4f/Provider/MagickPenChat.py index 6c30028a..40ef32c1 100644 --- a/g4f/Provider/MagickPenChat.py +++ b/g4f/Provider/MagickPenChat.py @@ -37,7 +37,8 @@ class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin): "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", + 'X-Api-Secret': 'W252GY255JVYBS9NAM' } async with ClientSession(headers=headers) as session: data = { diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index 7f4587e1..3656a39b 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/" class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "mixtral-8x7b-instruct" + default_model = "llama-3.1-8b-instruct" models = [ "llama-3.1-sonar-large-128k-online", "llama-3.1-sonar-small-128k-online", @@ -21,10 +21,6 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", - "gemma-2-9b-it", - "gemma-2-27b-it", - "nemotron-4-340b-instruct", - "mixtral-8x7b-instruct" ] @classmethod diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py index 8465a6c0..f44e0060 100644 --- a/g4f/Provider/Rocks.py +++ b/g4f/Provider/Rocks.py @@ -1,14 +1,17 @@ +import asyncio import json from aiohttp import ClientSession - from ..typing import Messages, AsyncResult from .base_provider import AsyncGeneratorProvider class Rocks(AsyncGeneratorProvider): - url = "https://api.discord.rocks" + url = "https://api.airforce" api_endpoint = "/chat/completions" - supports_message_history = False + supports_message_history = True supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True working = True @classmethod @@ -25,12 +28,13 @@ class Rocks(AsyncGeneratorProvider): "Accept": "application/json", "Accept-Encoding": "gzip, deflate, br, zstd", "Accept-Language": "en-US,en;q=0.9", - "Origin": cls.url, - "Referer": f"{cls.url}/en", + "Authorization": "Bearer missing api key", + "Origin": "https://llmplayground.net", + "Referer": "https://llmplayground.net/", "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", } async with ClientSession() as session: @@ -41,16 +45,26 @@ class Rocks(AsyncGeneratorProvider): headers=headers ) as response: response.raise_for_status() + last_chunk_time = asyncio.get_event_loop().time() + async for line in response.content: - if line.startswith(b"data: "): + current_time = asyncio.get_event_loop().time() + if current_time - last_chunk_time > 5: + return + + if line.startswith(b"\n"): + pass + elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode(): + pass # trolled + elif line.startswith(b"data: "): try: line = json.loads(line[6:]) - except: + except json.JSONDecodeError: continue chunk = line["choices"][0]["delta"].get("content") if chunk: yield chunk - elif line.startswith(b"\n"): - pass + last_chunk_time = current_time else: raise Exception(f"Unexpected line: {line}") + return
\ No newline at end of file |