From 890e7891c1e2cc7460626b115b1e3dad8bede316 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 16:17:59 +0300 Subject: feat(g4f/Provider/PerplexityLabs.py): add model aliases and update default model --- g4f/Provider/PerplexityLabs.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'g4f/Provider/PerplexityLabs.py') diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index 3656a39b..ecb51f9b 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/" class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "llama-3.1-8b-instruct" + default_model = "llama-3.1-70b-instruct" models = [ "llama-3.1-sonar-large-128k-online", "llama-3.1-sonar-small-128k-online", @@ -22,6 +22,15 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", ] + + model_aliases = { + "llama-3.1-8b": "llama-3.1-sonar-large-128k-online", + "llama-3.1-8b": "sonar-small-128k-online", + "llama-3.1-8b": "llama-3.1-sonar-large-128k-chat", + "llama-3.1-8b": "llama-3.1-sonar-small-128k-chat", + "llama-3.1-8b": "llama-3.1-8b-instruct", + "llama-3.1-70b": "llama-3.1-70b-instruct", + } @classmethod async def create_async_generator( -- cgit v1.2.3