diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-10-30 09:57:55 +0100 |
---|---|---|
committer | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-10-30 09:57:55 +0100 |
commit | 1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26 (patch) | |
tree | 435d831df4ad5c18839cfaa647f23e5be035cdd6 /g4f/Provider/nexra/NexraChatGptWeb.py | |
parent | implement direct import of `Client` without using `g4f.client` (diff) | |
parent | Merge pull request #2304 from kqlio67/main (diff) | |
download | gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.gz gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.bz2 gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.lz gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.xz gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.zst gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraChatGptWeb.py')
-rw-r--r-- | g4f/Provider/nexra/NexraChatGptWeb.py | 64 |
1 files changed, 0 insertions, 64 deletions
diff --git a/g4f/Provider/nexra/NexraChatGptWeb.py b/g4f/Provider/nexra/NexraChatGptWeb.py deleted file mode 100644 index f82694d4..00000000 --- a/g4f/Provider/nexra/NexraChatGptWeb.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json -import requests - -from ...typing import CreateResult, Messages -from ..base_provider import ProviderModelMixin, AbstractProvider -from ..helper import format_prompt - -class NexraChatGptWeb(AbstractProvider, ProviderModelMixin): - label = "Nexra ChatGPT Web" - url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" - working = True - - default_model = "gptweb" - models = [default_model] - model_aliases = {"gpt-4": "gptweb"} - api_endpoints = {"gptweb": "https://nexra.aryahcr.cc/api/chat/gptweb"} - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - proxy: str = None, - markdown: bool = False, - **kwargs - ) -> CreateResult: - model = cls.get_model(model) - api_endpoint = cls.api_endpoints.get(model, cls.api_endpoints[cls.default_model]) - - headers = { - 'Content-Type': 'application/json' - } - - data = { - "prompt": format_prompt(messages), - "markdown": markdown - } - - response = requests.post(api_endpoint, headers=headers, json=data) - - return cls.process_response(response) - - @classmethod - def process_response(cls, response): - if response.status_code == 200: - try: - content = response.text.lstrip('_') - json_response = json.loads(content) - return json_response.get('gpt', '') - except json.JSONDecodeError: - return "Error: Unable to decode JSON response" - else: - return f"Error: {response.status_code}" |