diff options
author | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2023-11-16 16:56:23 +0100 |
---|---|---|
committer | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2023-11-16 16:56:23 +0100 |
commit | 0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3 (patch) | |
tree | ba69bbcf859f6608c262f27d666af174f845cf8b /g4f/Provider | |
parent | Update README.md (diff) | |
download | gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar.gz gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar.bz2 gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar.lz gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar.xz gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.tar.zst gpt4free-0c4e5e5127f91c6789da8dbfbcbfee63d7a578a3.zip |
Diffstat (limited to 'g4f/Provider')
-rw-r--r-- | g4f/Provider/ChatAnywhere.py | 53 | ||||
-rw-r--r-- | g4f/Provider/MyShell.py | 147 | ||||
-rw-r--r-- | g4f/Provider/PerplexityAi.py | 121 | ||||
-rw-r--r-- | g4f/Provider/Phind.py | 165 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 168 | ||||
-rw-r--r-- | g4f/Provider/helper.py | 57 | ||||
-rw-r--r-- | g4f/Provider/unfinished/PerplexityAi.py | 100 | ||||
-rw-r--r-- | g4f/Provider/unfinished/__init__.py | 1 |
8 files changed, 419 insertions, 393 deletions
diff --git a/g4f/Provider/ChatAnywhere.py b/g4f/Provider/ChatAnywhere.py new file mode 100644 index 00000000..704544e9 --- /dev/null +++ b/g4f/Provider/ChatAnywhere.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + + +class ChatAnywhere(AsyncGeneratorProvider): + url = "https://chatanywhere.cn" + supports_gpt_35_turbo = True + supports_message_history = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + temperature: float = 0.5, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "Accept": "application/json, text/plain, */*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Content-Type": "application/json", + "Referer": f"{cls.url}/", + "Origin": cls.url, + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Authorization": "", + "Connection": "keep-alive", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + data = { + "list": messages, + "id": "s1_qYuOLXjI3rEpc7WHfQ", + "title": messages[-1]["content"], + "prompt": "", + "temperature": temperature, + "models": "61490748", + "continuous": True + } + async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py index 02b27354..09f6feb9 100644 --- a/g4f/Provider/MyShell.py +++ b/g4f/Provider/MyShell.py @@ -1,91 +1,94 @@ from __future__ import annotations -import time, random, json +import time, json -from ..requests import StreamSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +try: + from selenium.webdriver.remote.webdriver import WebDriver +except ImportError: + class WebDriver(): + pass -class MyShell(AsyncGeneratorProvider): +from ..typing import CreateResult, Messages +from .base_provider import BaseProvider +from .helper import format_prompt, get_browser + +class MyShell(BaseProvider): url = "https://app.myshell.ai/chat" working = True supports_gpt_35_turbo = True + supports_stream = True @classmethod - async def create_async_generator( + def create_completion( cls, model: str, messages: Messages, + stream: bool, proxy: str = None, timeout: int = 120, + browser: WebDriver = None, + display: bool = True, **kwargs - ) -> AsyncResult: - user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" - headers = { - "User-Agent": user_agent, - "Myshell-Service-Name": "organics-api", - "Visitor-Id": generate_visitor_id(user_agent) - } - async with StreamSession( - impersonate="chrome107", - proxies={"https": proxy}, - timeout=timeout, - headers=headers - ) as session: - prompt = format_prompt(messages) + ) -> CreateResult: + if not browser: + if display: + driver, display = get_browser("", True, proxy) + else: + display = get_browser("", False, proxy) + else: + driver = browser + + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + + driver.get(cls.url) + try: + WebDriverWait(driver, timeout).until( + EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)")) + ) + script = """ +response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { + "headers": { + "accept": "application/json", + "content-type": "application/json", + "myshell-service-name": "organics-api", + "visitor-id": localStorage.getItem("mix_visitorId") + }, + "body": '{body}', + "method": "POST" +}) +window.reader = response.body.getReader(); +""" data = { - "botId": "1", + "botId": "4738", "conversation_scenario": 3, - "message": prompt, + "message": format_prompt(messages), "messageType": 1 } - async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response: - response.raise_for_status() - event = None - async for line in response.iter_lines(): - if line.startswith(b"event: "): - event = line[7:] - elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT": - if line.startswith(b"data: "): - yield json.loads(line[6:])["content"] - if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED": - break - - -def xor_hash(B: str): - r = [] - i = 0 - - def o(e, t): - o_val = 0 - for i in range(len(t)): - o_val |= r[i] << (8 * i) - return e ^ o_val - - for e in range(len(B)): - t = ord(B[e]) - r.insert(0, 255 & t) - - if len(r) >= 4: - i = o(i, r) - r = [] - - if len(r) > 0: - i = o(i, r) - - return hex(i)[2:] - -def performance() -> str: - t = int(time.time() * 1000) - e = 0 - while t == int(time.time() * 1000): - e += 1 - return hex(t)[2:] + hex(e)[2:] - -def generate_visitor_id(user_agent: str) -> str: - f = performance() - r = hex(int(random.random() * (16**16)))[2:-2] - d = xor_hash(user_agent) - e = hex(1080 * 1920)[2:] - return f"{f}-{r}-{d}-{e}-{f}"
\ No newline at end of file + driver.execute_script(script.replace("{body}", json.dumps(data))) + script = """ +chunk = await window.reader.read(); +text = await (new Response(chunk['value']).text()); +content = ''; +text.split('\\n').forEach((line, index) => { + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.substring('data: '.length)); + if ('content' in data) { + content += data['content']; + } + } catch(e) {} + } +}); +return content; +""" + while chunk := driver.execute_script(script): + yield chunk + finally: + driver.close() + if not browser: + time.sleep(0.1) + driver.quit() + if display: + display.stop()
\ No newline at end of file diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py new file mode 100644 index 00000000..8bf83b6a --- /dev/null +++ b/g4f/Provider/PerplexityAi.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +import time +try: + from selenium.webdriver.remote.webdriver import WebDriver +except ImportError: + class WebDriver(): + pass + +from ..typing import CreateResult, Messages +from .base_provider import BaseProvider +from .helper import format_prompt, get_browser + +class PerplexityAi(BaseProvider): + url = "https://www.perplexity.ai" + working = True + supports_gpt_4 = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + timeout: int = 120, + browser: WebDriver = None, + copilot: bool = False, + display: bool = True, + **kwargs + ) -> CreateResult: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + + if browser: + driver = browser + else: + if display: + driver, display = get_browser("", True, proxy) + else: + driver = get_browser("", False, proxy) + + prompt = format_prompt(messages) + + driver.get(f"{cls.url}/") + wait = WebDriverWait(driver, timeout) + + script = """ +window._message = window._last_message = ""; +window._message_finished = false; +const _socket_send = WebSocket.prototype.send; +WebSocket.prototype.send = function(...args) { + if (!window.socket_onmessage) { + window._socket_onmessage = this; + this.addEventListener("message", (event) => { + if (event.data.startsWith("42")) { + let data = JSON.parse(event.data.substring(2)); + if (data[0] =="query_progress" || data[0] == "query_answered") { + let content = JSON.parse(data[1]["text"]); + if (data[1]["mode"] == "copilot") { + content = content[content.length-1]["content"]["answer"]; + content = JSON.parse(content); + } + window._message = content["answer"]; + window._message_finished = data[0] == "query_answered"; + window._web_results = content["web_results"]; + } + } + }); + } + return _socket_send.call(this, ...args); +}; +""" + driver.execute_script(script) + + # Page loaded? + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']"))) + + if copilot: + try: + driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']") + driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click() + except: + pass + + # Enter question + driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt) + # Submit question + driver.find_element(By.CSS_SELECTOR, "button.bg-super svg[data-icon='arrow-right']").click() + + try: + script = """ +if(window._message && window._message != window._last_message) { + try { + return window._message.substring(window._last_message.length); + } finally { + window._last_message = window._message; + } +} else if(window._message_finished) { + return null; +} else { + return ''; +} +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) + finally: + driver.close() + if not browser: + time.sleep(0.1) + driver.quit() + if display: + display.stop()
\ No newline at end of file diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py index 8f3e7942..56100d87 100644 --- a/g4f/Provider/Phind.py +++ b/g4f/Provider/Phind.py @@ -1,83 +1,116 @@ from __future__ import annotations -import random, string -from datetime import datetime +import time +from urllib.parse import quote +try: + from selenium.webdriver.remote.webdriver import WebDriver +except ImportError: + class WebDriver(): + pass -from ..typing import AsyncResult, Messages -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt +from ..typing import CreateResult, Messages +from .base_provider import BaseProvider +from .helper import format_prompt, get_browser - -class Phind(AsyncGeneratorProvider): +class Phind(BaseProvider): url = "https://www.phind.com" working = True supports_gpt_4 = True + supports_stream = True @classmethod - async def create_async_generator( + def create_completion( cls, model: str, messages: Messages, + stream: bool, proxy: str = None, timeout: int = 120, + browser: WebDriver = None, + creative_mode: bool = None, + display: bool = True, **kwargs - ) -> AsyncResult: - chars = string.ascii_lowercase + string.digits - user_id = ''.join(random.choice(chars) for _ in range(24)) - data = { - "question": format_prompt(messages), - "webResults": [], - "options": { - "date": datetime.now().strftime("%d.%m.%Y"), - "language": "en", - "detailed": True, - "anonUserId": user_id, - "answerModel": "GPT-4", - "creativeMode": False, - "customLinks": [] - }, - "context":"" - } - headers = { - "Authority": cls.url, - "Accept": "application/json, text/plain, */*", - "Origin": cls.url, - "Referer": f"{cls.url}/" - } - async with StreamSession( - headers=headers, - timeout=(5, timeout), - proxies={"https": proxy}, - impersonate="chrome107" - ) as session: - async with session.post(f"{cls.url}/api/infer/answer", json=data) as response: - response.raise_for_status() - new_lines = 0 - async for line in response.iter_lines(): - if not line: - continue - if line.startswith(b"data: "): - line = line[6:] - if line.startswith(b"<PHIND_METADATA>"): - continue - if line: - if new_lines: - yield "".join(["\n" for _ in range(int(new_lines / 2))]) - new_lines = 0 - yield line.decode() - else: - new_lines += 1 + ) -> CreateResult: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + if browser: + driver = browser + else: + if display: + driver, display = get_browser("", True, proxy) + else: + driver = get_browser("", False, proxy) - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("timeout", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + prompt = quote(format_prompt(messages)) + driver.get(f"{cls.url}/search?q={prompt}&source=searchbox") + + if model.startswith("gpt-4") or creative_mode: + wait = WebDriverWait(driver, timeout) + # Open dropdown + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle"))) + driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click() + # Enable GPT-4 + wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']"))) + if model.startswith("gpt-4"): + driver.find_element(By.XPATH, "//button[text()='GPT-4']").click() + # Enable creative mode + if creative_mode or creative_mode == None: + driver.find_element(By.ID, "Creative Mode").click() + # Submit question + driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click() + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container"))) + + try: + script = """ +window._fetch = window.fetch; +window.fetch = (url, options) => { + const result = window._fetch(url, options); + if (url != "/api/infer/answer") return result; + result.then((response) => { + if (!response.body.locked) { + window.reader = response.body.getReader(); + } + }); + return new Promise((resolve, reject) => { + resolve(new Response(new ReadableStream())) + }); +} +""" + driver.execute_script(script) + script = """ +if(window.reader) { + chunk = await window.reader.read(); + if (chunk['done']) return null; + text = await (new Response(chunk['value']).text()); + content = ''; + text.split('\\r\\n').forEach((line, index) => { + if (line.startsWith('data: ')) { + line = line.substring('data: '.length); + if (!line.startsWith('<PHIND_METADATA>')) { + if (line) content += line; + else content += '\\n'; + } + } + }); + return content.replace('\\n\\n', '\\n'); +} else { + return '' +} +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) + finally: + driver.close() + if not browser: + time.sleep(0.1) + driver.quit() + if display: + display.stop()
\ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 70ad9de7..f1f0a167 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -1,10 +1,12 @@ -from __future__ import annotations +from __future__ import annotations + from .AiAsk import AiAsk from .Aichat import Aichat from .AItianhu import AItianhu from .AItianhuSpace import AItianhuSpace from .Berlin import Berlin from .Bing import Bing +from .ChatAnywhere import ChatAnywhere from .ChatBase import ChatBase from .ChatForAi import ChatForAi from .Chatgpt4Online import Chatgpt4Online @@ -28,6 +30,7 @@ from .Llama2 import Llama2 from .MyShell import MyShell from .NoowAi import NoowAi from .Opchatgpts import Opchatgpts +from .PerplexityAi import PerplexityAi from .Phind import Phind from .Vercel import Vercel from .Ylokh import Ylokh @@ -41,150 +44,23 @@ from .deprecated import * from .needs_auth import * from .unfinished import * -class ProviderUtils: - convert: dict[str, BaseProvider] = { - 'AItianhu': AItianhu, - 'AItianhuSpace': AItianhuSpace, - 'Acytoo': Acytoo, - 'AiAsk': AiAsk, - 'AiService': AiService, - 'Aibn': Aibn, - 'Aichat': Aichat, - 'Ails': Ails, - 'Aivvm': Aivvm, - 'AsyncGeneratorProvider': AsyncGeneratorProvider, - 'AsyncProvider': AsyncProvider, - 'Bard': Bard, - 'BaseProvider': BaseProvider, - 'Berlin': Berlin, - 'Bing': Bing, - 'ChatBase': ChatBase, - 'ChatForAi': ChatForAi, - 'Chatgpt4Online': Chatgpt4Online, - 'ChatgptAi': ChatgptAi, - 'ChatgptDemo': ChatgptDemo, - 'ChatgptDuo': ChatgptDuo, - 'ChatgptFree': ChatgptFree, - 'ChatgptLogin': ChatgptLogin, - 'ChatgptX': ChatgptX, - 'CodeLinkAva': CodeLinkAva, - 'Cromicle': Cromicle, - 'DeepInfra': DeepInfra, - 'DfeHub': DfeHub, - 'EasyChat': EasyChat, - 'Equing': Equing, - 'FastGpt': FastGpt, - 'Forefront': Forefront, - 'FakeGpt': FakeGpt, - 'FreeGpt': FreeGpt, - 'GPTalk': GPTalk, - 'GptChatly': GptChatly, - 'GetGpt': GetGpt, - 'GptForLove': GptForLove, - 'GptGo': GptGo, - 'GptGod': GptGod, - 'Hashnode': Hashnode, - 'H2o': H2o, - 'HuggingChat': HuggingChat, - 'Komo': Komo, - 'Koala': Koala, - 'Liaobots': Liaobots, - 'Llama2': Llama2, - 'Lockchat': Lockchat, - 'MikuChat': MikuChat, - 'Myshell': Myshell, - 'MyShell': MyShell, - 'NoowAi': NoowAi, - 'Opchatgpts': Opchatgpts, - 'OpenAssistant': OpenAssistant, - 'OpenaiChat': OpenaiChat, - 'PerplexityAi': PerplexityAi, - 'Phind': Phind, - 'Raycast': Raycast, - 'Theb': Theb, - 'V50': V50, - 'Vercel': Vercel, - 'Vitalentum': Vitalentum, - 'Wewordle': Wewordle, - 'Wuguokai': Wuguokai, - 'Ylokh': Ylokh, - 'You': You, - 'Yqcloud': Yqcloud, - 'GeekGpt': GeekGpt, - - 'BaseProvider': BaseProvider, - 'AsyncProvider': AsyncProvider, - 'AsyncGeneratorProvider': AsyncGeneratorProvider, - 'RetryProvider': RetryProvider, - } +import sys -__all__ = [ - 'BaseProvider', - 'AsyncProvider', - 'AsyncGeneratorProvider', - 'RetryProvider', - 'Acytoo', - 'AiAsk', - 'Aibn', - 'Aichat', - 'Ails', - 'Aivvm', - 'AiService', - 'AItianhu', - 'AItianhuSpace', - 'Aivvm', - 'Bard', - 'Berlin', - 'Bing', - 'ChatBase', - 'ChatForAi', - 'Chatgpt4Online', - 'ChatgptAi', - 'ChatgptDemo', - 'ChatgptDuo', - 'ChatgptFree', - 'ChatgptLogin', - 'ChatgptX', - 'Cromicle', - 'DeepInfra', - 'CodeLinkAva', - 'DfeHub', - 'EasyChat', - 'Forefront', - 'FakeGpt', - 'FreeGpt', - 'GPTalk', - 'GptChatly', - 'GptForLove', - 'GetGpt', - 'GptGo', - 'GptGod', - 'Hashnode', - 'H2o', - 'HuggingChat', - 'Koala', - 'Liaobots', - 'Llama2', - 'Lockchat', - 'Myshell', - 'MyShell', - 'NoowAi', - 'Opchatgpts', - 'Raycast', - 'OpenaiChat', - 'OpenAssistant', - 'PerplexityAi', - 'Phind', - 'Theb', - 'Vercel', - 'Vitalentum', - 'Wewordle', - 'Ylokh', - 'You', - 'Yqcloud', - 'Equing', - 'FastGpt', - 'Wuguokai', - 'V50', - 'GeekGpt' +__modules__: list = [ + getattr(sys.modules[__name__], provider) for provider in dir() + if not provider.startswith("__") +] +__providers__: list[type[BaseProvider]] = [ + provider for provider in __modules__ + if isinstance(provider, type) + and issubclass(provider, BaseProvider) ] +__all__: list[str] = [ + provider.__name__ for provider in __providers__ +] +__map__: dict[str, BaseProvider] = dict([ + (provider.__name__, provider) for provider in __providers__ +]) + +class ProviderUtils: + convert: dict[str, BaseProvider] = __map__
\ No newline at end of file diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 519a024a..cd9a971c 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -3,13 +3,39 @@ from __future__ import annotations import sys import asyncio import webbrowser - from os import path from asyncio import AbstractEventLoop from platformdirs import user_config_dir +from browser_cookie3 import ( + chrome, + chromium, + opera, + opera_gx, + brave, + edge, + vivaldi, + firefox, + BrowserCookieError +) +try: + from undetected_chromedriver import Chrome, ChromeOptions +except ImportError: + class Chrome(): + def __init__(): + raise RuntimeError('Please install "undetected_chromedriver" and "pyvirtualdisplay" package') + class ChromeOptions(): + def add_argument(): + pass +try: + from pyvirtualdisplay import Display +except ImportError: + class Display(): + def start(): + pass + def stop(): + pass -from ..typing import Dict, Messages -from browser_cookie3 import chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox, BrowserCookieError +from ..typing import Dict, Messages, Union, Tuple from .. import debug # Change event loop policy on windows @@ -106,10 +132,25 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str: return f"{formatted}\nAssistant:" -def get_browser(user_data_dir: str = None): - from undetected_chromedriver import Chrome - - if not user_data_dir: +def get_browser( + user_data_dir: str = None, + display: bool = False, + proxy: str = None +) -> Union[Chrome, Tuple[Chrome, Display]] : + if user_data_dir == None: user_data_dir = user_config_dir("g4f") - return Chrome(user_data_dir=user_data_dir)
\ No newline at end of file + if display: + display = Display(visible=0, size=(1920, 1080)) + display.start() + + options = None + if proxy: + options = ChromeOptions() + options.add_argument(f'--proxy-server={proxy}') + + browser = Chrome(user_data_dir=user_data_dir, options=options) + if display: + return browser, display + + return browser
\ No newline at end of file diff --git a/g4f/Provider/unfinished/PerplexityAi.py b/g4f/Provider/unfinished/PerplexityAi.py deleted file mode 100644 index e97dbf0d..00000000 --- a/g4f/Provider/unfinished/PerplexityAi.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import annotations - -import json -import time -import base64 -from curl_cffi.requests import AsyncSession - -from ..base_provider import AsyncProvider, format_prompt, get_cookies - - -class PerplexityAi(AsyncProvider): - url = "https://www.perplexity.ai" - supports_gpt_35_turbo = True - _sources = [] - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - url = f"{cls.url}/socket.io/?EIO=4&transport=polling" - headers = { - "Referer": f"{cls.url}/" - } - async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session: - url_session = "https://www.perplexity.ai/api/auth/session" - response = await session.get(url_session) - response.raise_for_status() - - url_session = "https://www.perplexity.ai/api/auth/session" - response = await session.get(url_session) - response.raise_for_status() - - response = await session.get(url, params={"t": timestamp()}) - response.raise_for_status() - sid = json.loads(response.text[1:])["sid"] - - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - - data = '40{"jwt":"anonymous-ask-user"}' - response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) - response.raise_for_status() - - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - - data = "424" + json.dumps([ - "perplexity_ask", - format_prompt(messages), - { - "version":"2.1", - "source":"default", - "language":"en", - "timezone": time.tzname[0], - "search_focus":"internet", - "mode":"concise" - } - ]) - response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) - response.raise_for_status() - - while True: - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - for line in response.text.splitlines(): - if line.startswith("434"): - result = json.loads(json.loads(line[3:])[0]["text"]) - - cls._sources = [{ - "title": source["name"], - "url": source["url"], - "snippet": source["snippet"] - } for source in result["web_results"]] - - return result["answer"] - - @classmethod - def get_sources(cls): - return cls._sources - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def timestamp() -> str: - return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py index bf5ff9aa..712e6212 100644 --- a/g4f/Provider/unfinished/__init__.py +++ b/g4f/Provider/unfinished/__init__.py @@ -1,5 +1,4 @@ from .MikuChat import MikuChat -from .PerplexityAi import PerplexityAi from .Komo import Komo from .TalkAi import TalkAi from .ChatAiGpt import ChatAiGpt
\ No newline at end of file |