summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/nexra/NexraChatGPT.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/nexra/NexraChatGPT.py')
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py97
1 files changed, 60 insertions, 37 deletions
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
index 8ed83f98..f9f49139 100644
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -1,22 +1,60 @@
from __future__ import annotations
+
from aiohttp import ClientSession
+import json
+
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
-import json
+
class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra ChatGPT"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = False
+
+ default_model = 'gpt-3.5-turbo'
+ models = ['gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002']
+
+ model_aliases = {
+ "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4-32k",
+ "gpt-4": "gpt-4-0314",
+ "gpt-4": "gpt-4-32k-0314",
+
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
+
+ "gpt-3": "text-davinci-003",
+ "gpt-3": "text-davinci-002",
+ "gpt-3": "code-davinci-002",
+ "gpt-3": "text-curie-001",
+ "gpt-3": "text-babbage-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "davinci",
+ "gpt-3": "curie",
+ "gpt-3": "babbage",
+ "gpt-3": "ada",
+ "gpt-3": "babbage-002",
+ "gpt-3": "davinci-002",
+ }
- models = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- ]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -26,41 +64,26 @@ class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- "Accept": "application/json",
- "Content-Type": "application/json",
- "Referer": f"{cls.url}/chat",
+ "Content-Type": "application/json"
}
-
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
+ "messages": messages,
"prompt": prompt,
"model": model,
- "markdown": False,
- "messages": messages or [],
+ "markdown": False
}
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
-
- content_type = response.headers.get('Content-Type', '')
- if 'application/json' in content_type:
- result = await response.json()
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- elif 'text/plain' in content_type:
- text = await response.text()
- try:
- result = json.loads(text)
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- except json.JSONDecodeError:
- yield text # If not JSON, return text
- else:
- raise Exception(f"Unexpected response type: {content_type}. Response text: {await response.text()}")
-
+ response_text = await response.text()
+ try:
+ if response_text.startswith('_'):
+ response_text = response_text[1:]
+ response_data = json.loads(response_text)
+ yield response_data.get('gpt', '')
+ except json.JSONDecodeError:
+ yield ''