summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Nexra.py
blob: b2b83837f2024a9ffc929d58a71f506af25427a3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from __future__ import annotations
import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..image import ImageResponse

class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
    url = "https://nexra.aryahcr.cc"
    chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
    image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
    working = True
    supports_gpt_35_turbo = True
    supports_gpt_4 = True
    supports_system_message = True
    supports_message_history = True
    
    default_model = 'gpt-3.5-turbo'
    text_models = [
        'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
        'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
        'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
        'text-curie-001', 'text-babbage-001', 'text-ada-001',
        'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
    ]
    image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi']
    models = [*text_models, *image_models]
    
    model_aliases = {
        "gpt-4": "gpt-4-0613",
        "gpt-4": "gpt-4-32k",
        "gpt-4": "gpt-4-0314",
        "gpt-4": "gpt-4-32k-0314",
        
        "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
        "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
        "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
        "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
        
        "gpt-3": "text-davinci-003",
        "gpt-3": "text-davinci-002",
        "gpt-3": "code-davinci-002",
        "gpt-3": "text-curie-001",
        "gpt-3": "text-babbage-001",
        "gpt-3": "text-ada-001",
        "gpt-3": "text-ada-001",
        "gpt-3": "davinci",
        "gpt-3": "curie",
        "gpt-3": "babbage",
        "gpt-3": "ada",
        "gpt-3": "babbage-002",
        "gpt-3": "davinci-002",
        
        "dalle-2": "dalle2",
    }
    
    @classmethod
    def get_model(cls, model: str) -> str:
        if model in cls.text_models or model in cls.image_models:
            return model
        elif model in cls.model_aliases:
            return cls.model_aliases[model]
        else:
            return cls.default_model

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        proxy: str = None,
        **kwargs
    ) -> AsyncResult:
        model = cls.get_model(model)
        
        headers = {
            "Content-Type": "application/json",
        }
        
        async with ClientSession(headers=headers) as session:
            if model in cls.image_models:
                # Image generation
                prompt = messages[-1]['content'] if messages else ""
                data = {
                    "prompt": prompt,
                    "model": model,
                    "response": "url"
                }
                async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
                    response.raise_for_status()
                    result = await response.text()
                    result_json = json.loads(result.strip('_'))
                    image_url = result_json['images'][0] if result_json['images'] else None
                    
                    if image_url:
                        yield ImageResponse(images=image_url, alt=prompt)
            else:
                # Text completion
                data = {
                    "messages": messages,
                    "prompt": format_prompt(messages),
                    "model": model,
                    "markdown": False
                }
                async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response:
                    response.raise_for_status()
                    result = await response.text()
                    
                    try:
                        json_response = json.loads(result)
                        gpt_response = json_response.get('gpt', '')
                        yield gpt_response
                    except json.JSONDecodeError:
                        yield result