diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-03-11 07:47:35 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-11 07:47:35 +0100 |
commit | 0b850ac9fcabc52fecdeea8d56e620e44b35d180 (patch) | |
tree | f92578686f34476be49422889d4587d8f3cf2fa6 /g4f/Provider/You.py | |
parent | Merge pull request #1670 from xtekky/xtekky-patch-1 (diff) | |
parent | Fix HuggingChat and PerplexityLabs and add HuggingFace provider (diff) | |
download | gpt4free-0.2.3.1.tar gpt4free-0.2.3.1.tar.gz gpt4free-0.2.3.1.tar.bz2 gpt4free-0.2.3.1.tar.lz gpt4free-0.2.3.1.tar.xz gpt4free-0.2.3.1.tar.zst gpt4free-0.2.3.1.zip |
Diffstat (limited to 'g4f/Provider/You.py')
-rw-r--r-- | g4f/Provider/You.py | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index b21fd582..1fdaf06d 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -1,21 +1,37 @@ from __future__ import annotations +import re import json import base64 import uuid from aiohttp import ClientSession, FormData, BaseConnector from ..typing import AsyncResult, Messages, ImageType, Cookies -from .base_provider import AsyncGeneratorProvider +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..providers.helper import get_connector, format_prompt -from ..image import to_bytes +from ..image import to_bytes, ImageResponse from ..requests.defaults import DEFAULT_HEADERS -class You(AsyncGeneratorProvider): +class You(AsyncGeneratorProvider, ProviderModelMixin): url = "https://you.com" working = True supports_gpt_35_turbo = True supports_gpt_4 = True + default_model = "gpt-3.5-turbo" + models = [ + "gpt-3.5-turbo", + "gpt-4", + "gpt-4-turbo", + "claude-instant", + "claude-2", + "claude-3-opus", + "claude-3-sonnet", + "gemini-pro", + "zephyr" + ] + model_aliases = { + "claude-v2": "claude-2" + } _cookies = None _cookies_used = 0 @@ -35,10 +51,15 @@ class You(AsyncGeneratorProvider): connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS ) as client: - if image: + if image is not None: chat_mode = "agent" - elif model == "gpt-4": - chat_mode = model + elif not model or model == cls.default_model: + chat_mode = "default" + elif model.startswith("dall-e"): + chat_mode = "create" + else: + chat_mode = "custom" + model = cls.get_model(model) cookies = await cls.get_cookies(client) if chat_mode != "default" else None upload = json.dumps([await cls.upload_file(client, cookies, to_bytes(image), image_name)]) if image else "" #questions = [message["content"] for message in messages if message["role"] == "user"] @@ -63,6 +84,8 @@ class You(AsyncGeneratorProvider): "userFiles": upload, "selectedChatMode": chat_mode, } + if chat_mode == "custom": + params["selectedAIModel"] = model.replace("-", "_") async with (client.post if chat_mode == "default" else client.get)( f"{cls.url}/api/streamingSearch", data=data, @@ -80,7 +103,11 @@ class You(AsyncGeneratorProvider): if event == "youChatToken" and event in data: yield data[event] elif event == "youChatUpdate" and "t" in data: - yield data["t"] + match = re.search(r"!\[fig\]\((.+?)\)", data["t"]) + if match: + yield ImageResponse(match.group(1), messages[-1]["content"]) + else: + yield data["t"] @classmethod async def upload_file(cls, client: ClientSession, cookies: Cookies, file: bytes, filename: str = None) -> dict: |