diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-03-12 02:06:06 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-12 02:06:06 +0100 |
commit | 6ef282de3a3245acbfecd08ae48dba85ff91d031 (patch) | |
tree | 0236c9678eea8f9c78ed7c09f3d86eaf3d7c691c /g4f/Provider/ChatForAi.py | |
parent | Update .gitignore (diff) | |
download | gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.gz gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.bz2 gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.lz gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.xz gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.zst gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.zip |
Diffstat (limited to 'g4f/Provider/ChatForAi.py')
-rw-r--r-- | g4f/Provider/ChatForAi.py | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index afab034b..5aa728a1 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -2,15 +2,17 @@ from __future__ import annotations import time import hashlib +import uuid from ..typing import AsyncResult, Messages from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider +from ..errors import RateLimitError +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class ChatForAi(AsyncGeneratorProvider): +class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatforai.store" working = True + default_model = "gpt-3.5-turbo" supports_message_history = True supports_gpt_35_turbo = True @@ -21,36 +23,39 @@ class ChatForAi(AsyncGeneratorProvider): messages: Messages, proxy: str = None, timeout: int = 120, + temperature: float = 0.7, + top_p: float = 1, **kwargs ) -> AsyncResult: + model = cls.get_model(model) headers = { "Content-Type": "text/plain;charset=UTF-8", "Origin": cls.url, "Referer": f"{cls.url}/?r=b", } - async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: - prompt = messages[-1]["content"] + async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: timestamp = int(time.time() * 1e3) - conversation_id = f"id_{timestamp-123}" + conversation_id = str(uuid.uuid4()) data = { "conversationId": conversation_id, "conversationType": "chat_continuous", "botId": "chat_continuous", "globalSettings":{ "baseUrl": "https://api.openai.com", - "model": model if model else "gpt-3.5-turbo", + "model": model, "messageHistorySize": 5, - "temperature": 0.7, - "top_p": 1, + "temperature": temperature, + "top_p": top_p, **kwargs }, - "botSettings": {}, - "prompt": prompt, + "prompt": "", "messages": messages, "timestamp": timestamp, - "sign": generate_signature(timestamp, prompt, conversation_id) + "sign": generate_signature(timestamp, "", conversation_id) } async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: + if response.status == 429: + raise RateLimitError("Rate limit reached") response.raise_for_status() async for chunk in response.iter_content(): if b"https://chatforai.store" in chunk: @@ -59,5 +64,5 @@ class ChatForAi(AsyncGeneratorProvider): def generate_signature(timestamp: int, message: str, id: str): - buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" + buffer = f"{id}:{timestamp}:{message}:h496Jd6b" return hashlib.sha256(buffer.encode()).hexdigest() |