diff options
-rw-r--r-- | etc/testing/test_all.py | 67 | ||||
-rw-r--r-- | etc/testing/test_chat_completion.py | 8 | ||||
-rw-r--r-- | g4f/Provider/Aivvm.py | 59 | ||||
-rw-r--r-- | g4f/Provider/DeepAi.py | 5 |
4 files changed, 107 insertions, 32 deletions
diff --git a/etc/testing/test_all.py b/etc/testing/test_all.py new file mode 100644 index 00000000..73134e3f --- /dev/null +++ b/etc/testing/test_all.py @@ -0,0 +1,67 @@ +import asyncio +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +import g4f + + +async def test(model: g4f.Model): + try: + try: + for response in g4f.ChatCompletion.create( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + except: + for response in await g4f.ChatCompletion.create_async( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + + return True + except Exception as e: + print(model.name, "not working:", e) + print(e.__traceback__.tb_next) + return False + + +async def start_test(): + models_to_test = [ + # GPT-3.5 4K Context + g4f.models.gpt_35_turbo, + g4f.models.gpt_35_turbo_0613, + + # GPT-3.5 16K Context + g4f.models.gpt_35_turbo_16k, + g4f.models.gpt_35_turbo_16k_0613, + + # GPT-4 8K Context + g4f.models.gpt_4, + g4f.models.gpt_4_0613, + + # GPT-4 32K Context + g4f.models.gpt_4_32k, + g4f.models.gpt_4_32k_0613, + ] + + models_working = [] + + for model in models_to_test: + if await test(model): + models_working.append(model.name) + + print("working models:", models_working) + + +asyncio.run(start_test()) diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index ee523b86..7058ab4c 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -7,10 +7,10 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.default, - provider=g4f.Provider.GptForLove, - messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], - temperature=0.0, + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, stream=True ): print(response, end="", flush=True) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 3e4bbaeb..ac15ac16 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -1,8 +1,9 @@ from __future__ import annotations +import requests -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncResult, Messages +from .base_provider import BaseProvider +from ..typing import CreateResult +from json import dumps # to recreate this easily, send a post request to https://chat.aivvm.com/api/models models = { @@ -16,22 +17,20 @@ models = { 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, } -class Aivvm(AsyncGeneratorProvider): +class Aivvm(BaseProvider): url = 'https://chat.aivvm.com' + supports_stream = True + working = True supports_gpt_35_turbo = True supports_gpt_4 = True - working = True @classmethod - async def create_async_generator( - cls, + def create_completion(cls, model: str, - messages: Messages, + messages: list[dict[str, str]], stream: bool, - proxy: str = None, - timeout: int = 120, **kwargs - ) -> AsyncResult: + ) -> CreateResult: if not model: model = "gpt-3.5-turbo" elif model not in models: @@ -44,24 +43,30 @@ class Aivvm(AsyncGeneratorProvider): "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "temperature" : kwargs.get("temperature", 0.7) } + headers = { - "Accept": "text/event-stream", - "Origin": cls.url, - "Referer": f"{cls.url}/", + "accept" : "text/event-stream", + "accept-language" : "en-US,en;q=0.9", + "content-type" : "application/json", + "content-length" : str(len(dumps(json_data))), + "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"", + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": "\"Windows\"", + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "sec-gpc" : "1", + "referrer" : "https://chat.aivvm.com/" } - async with StreamSession( - impersonate="chrome117", - headers=headers, - proxies={"https": proxy}, - timeout=timeout - ) as session: - async with session.post(f"{cls.url}/api/chat", json=json_data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - if b'Access denied | chat.aivvm.com used Cloudflare' in chunk: - raise ValueError("Rate Limit | use another provider") - - yield chunk.decode() + + response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) + response.raise_for_status() + + for chunk in response.iter_content(): + try: + yield chunk.decode("utf-8") + except UnicodeDecodeError: + yield chunk.decode("unicode-escape") @classmethod @property diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py index e1ee4446..abc2644a 100644 --- a/g4f/Provider/DeepAi.py +++ b/g4f/Provider/DeepAi.py @@ -65,7 +65,10 @@ f = function () { response.raise_for_status() async for stream in response.content.iter_any(): if stream: - yield stream.decode() + try: + yield stream.decode("utf-8") + except UnicodeDecodeError: + yield stream.decode("unicode-escape") def get_api_key(user_agent: str): |