diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-08-30 22:39:18 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-08-30 22:39:18 +0200 |
commit | c702f54e39a39c702cb2a2a8c6782c15422785aa (patch) | |
tree | 8a36ace98ab138e1eff134a5ed8891fd3c817b5b /g4f/Provider/Chatgpt4Online.py | |
parent | . (diff) | |
parent | fix for 500 Internal Server Error #2199 [Request] Blackbox provider now support Gemini and LLaMa 3.1 models #2198 with some stuff from #2196 (diff) | |
download | gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.gz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.bz2 gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.lz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.xz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.zst gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.zip |
Diffstat (limited to 'g4f/Provider/Chatgpt4Online.py')
-rw-r--r-- | g4f/Provider/Chatgpt4Online.py | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py index f62ef8af..8c058fdc 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/Chatgpt4Online.py @@ -14,8 +14,8 @@ class Chatgpt4Online(AsyncGeneratorProvider): working = True supports_gpt_4 = True - async def get_nonce(): - async with ClientSession() as session: + async def get_nonce(headers: dict) -> str: + async with ClientSession(headers=headers) as session: async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response: return (await response.json())["restNonce"] @@ -42,9 +42,8 @@ class Chatgpt4Online(AsyncGeneratorProvider): "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - "x-wp-nonce": await cls.get_nonce(), } - + headers['x-wp-nonce'] = await cls.get_nonce(headers) async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) data = { |