diff options
-rw-r--r-- | README.md | 46 | ||||
-rw-r--r-- | etc/unittest/integration.py | 1 | ||||
-rw-r--r-- | g4f/Provider/FlowGpt.py | 2 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 3 | ||||
-rw-r--r-- | g4f/Provider/Llama.py (renamed from g4f/Provider/Llama2.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/PerplexityLabs.py | 7 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 2 | ||||
-rw-r--r-- | g4f/gui/client/index.html | 1 | ||||
-rw-r--r-- | g4f/gui/client/static/js/chat.v1.js | 2 | ||||
-rw-r--r-- | g4f/locals/provider.py | 7 | ||||
-rw-r--r-- | g4f/models.py | 63 | ||||
-rw-r--r-- | g4f/providers/base_provider.py | 9 |
12 files changed, 97 insertions, 54 deletions
@@ -345,29 +345,31 @@ While we wait for gpt-5, here is a list of new models that are at least better t ### Models -| Model | Base Provider | Provider | Website | -| ----- | ------------- | -------- | ------- | -| gpt-3.5-turbo | OpenAI | 5+ Providers | [openai.com](https://openai.com/) | -| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) | -| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) | -| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | -| CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) | -| Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) | -| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | +| Model | Base Provider | Provider | Website | +|-----------------------------| ------------- | -------- | ------- | +| gpt-3.5-turbo | OpenAI | 5+ Providers | [openai.com](https://openai.com/) | +| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) | +| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) | +| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | +| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | +| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | +| Meta-Llama-3-8b | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | +| Meta-Llama-3-70b | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | +| CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | +| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | +| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) | +| Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) | +| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | +| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | +| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | | airoboros-l2-70b-gpt4-1.4.1 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) | -| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) | -| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) | -| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) | -| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | -| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | -| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) | +| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) | +| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) | +| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) | +| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) | +| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | +| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | +| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) | ## 🔗 Powered by gpt4free diff --git a/etc/unittest/integration.py b/etc/unittest/integration.py index 14d0bf7b..379f36b6 100644 --- a/etc/unittest/integration.py +++ b/etc/unittest/integration.py @@ -32,6 +32,7 @@ class TestProviderIntegration(unittest.TestCase): self.assertIn("success", json.loads(response.choices[0].message.content)) def test_openai(self): + self.skipTest("not working in this network") client = Client(provider=OpenaiChat) response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"}) self.assertIsInstance(response, ChatCompletion) diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py index 7edd6f19..6c2aa046 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/FlowGpt.py @@ -99,4 +99,4 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): if "event" not in message: continue if message["event"] == "text": - yield message["data"]
\ No newline at end of file + yield message["data"] diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 882edb78..668ce4b1 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -19,7 +19,8 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'google/gemma-1.1-7b-it', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'mistralai/Mistral-7B-Instruct-v0.2' + 'mistralai/Mistral-7B-Instruct-v0.2', + 'meta-llama/Meta-Llama-3-70B-Instruct' ] model_aliases = { "openchat/openchat_3.5": "openchat/openchat-3.5-0106", diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama.py index 04b5aee0..8f3e9ea2 100644 --- a/g4f/Provider/Llama2.py +++ b/g4f/Provider/Llama.py @@ -7,17 +7,21 @@ from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -class Llama2(AsyncGeneratorProvider, ProviderModelMixin): +class Llama(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.llama2.ai" working = True supports_message_history = True - default_model = "meta/llama-2-70b-chat" + default_model = "meta/llama-3-70b-chat" models = [ "meta/llama-2-7b-chat", "meta/llama-2-13b-chat", "meta/llama-2-70b-chat", + "meta/meta-llama-3-8b-instruct", + "meta/meta-llama-3-70b-instruct", ] model_aliases = { + "meta-llama/Meta-Llama-3-8b-instruct": "meta/meta-llama-3-8b-instruct", + "meta-llama/Meta-Llama-3-70b-instruct": "meta/meta-llama-3-70b-instruct", "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index ab36d284..b6fec53c 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -15,10 +15,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = "mixtral-8x7b-instruct" models = [ - "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct", - "codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", - "gemma-2b-it", "gemma-7b-it" - "mistral-medium", "related", "dbrx-instruct" + "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it", "related" ] model_aliases = { "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct", @@ -93,4 +90,4 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): if data["final"]: break except: - raise RuntimeError(f"Message: {message}")
\ No newline at end of file + raise RuntimeError(f"Message: {message}") diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index d5913e3c..f761df5b 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -40,7 +40,7 @@ from .HuggingChat import HuggingChat from .HuggingFace import HuggingFace from .Koala import Koala from .Liaobots import Liaobots -from .Llama2 import Llama2 +from .Llama import Llama from .Local import Local from .PerplexityLabs import PerplexityLabs from .Pi import Pi diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index a6c4909b..d84bbbe9 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -220,6 +220,7 @@ <option value="gpt-4">gpt-4</option> <option value="gpt-3.5-turbo">gpt-3.5-turbo</option> <option value="llama2-70b">llama2-70b</option> + <option value="llama3-70b-instruct">llama3-70b-instruct</option> <option value="gemini-pro">gemini-pro</option> <option value="">----</option> </select> diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index a17be16e..39027260 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -926,7 +926,7 @@ colorThemes.forEach((themeOption) => { function count_tokens(model, text) { if (model) { if (window.llamaTokenizer) - if (model.startsWith("llama2") || model.startsWith("codellama")) { + if (model.startsWith("llama") || model.startsWith("codellama")) { return llamaTokenizer.encode(text).length; } if (window.mistralTokenizer) diff --git a/g4f/locals/provider.py b/g4f/locals/provider.py index 45041539..d9d73455 100644 --- a/g4f/locals/provider.py +++ b/g4f/locals/provider.py @@ -66,9 +66,12 @@ class LocalProvider: if message["role"] != "system" ) + "\nASSISTANT: " + def should_not_stop(token_id: int, token: str): + return "USER" not in token + with model.chat_session(system_message, prompt_template): if stream: - for token in model.generate(conversation, streaming=True): + for token in model.generate(conversation, streaming=True, callback=should_not_stop): yield token else: - yield model.generate(conversation)
\ No newline at end of file + yield model.generate(conversation, callback=should_not_stop)
\ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index fe99958c..2b7c69f3 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,28 +4,33 @@ from dataclasses import dataclass from .Provider import RetryProvider, ProviderType from .Provider import ( + Aichatos, + Bing, + Blackbox, Chatgpt4Online, - PerplexityLabs, - GeminiProChat, - ChatgptNext, - HuggingChat, - HuggingFace, - OpenaiChat, ChatgptAi, + ChatgptNext, + Cohere, + Cnote, DeepInfra, - GigaChat, - Liaobots, + Feedough, FreeGpt, - Llama2, - Vercel, Gemini, + GeminiProChat, + GigaChat, + HuggingChat, + HuggingFace, Koala, - Cohere, - Bing, - You, + Liaobots, + Llama, + OpenaiChat, + PerplexityLabs, Pi, + Vercel, + You, ) + @dataclass(unsafe_hash=True) class Model: """ @@ -79,6 +84,9 @@ gpt_35_turbo = Model( ChatgptNext, Koala, OpenaiChat, + Aichatos, + Cnote, + Feedough, ]) ) @@ -117,19 +125,31 @@ gigachat_pro = Model( llama2_7b = Model( name = "meta-llama/Llama-2-7b-chat-hf", base_provider = 'meta', - best_provider = RetryProvider([Llama2, DeepInfra]) + best_provider = RetryProvider([Llama, DeepInfra]) ) llama2_13b = Model( name = "meta-llama/Llama-2-13b-chat-hf", base_provider = 'meta', - best_provider = RetryProvider([Llama2, DeepInfra]) + best_provider = RetryProvider([Llama, DeepInfra]) ) llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "meta", - best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]) + best_provider = RetryProvider([Llama, DeepInfra, HuggingChat]) +) + +llama3_8b_instruct = Model( + name = "meta-llama/Meta-Llama-3-8b-instruct", + base_provider = "meta", + best_provider = RetryProvider([Llama]) +) + +llama3_70b_instruct = Model( + name = "meta-llama/Meta-Llama-3-70b-instruct", + base_provider = "meta", + best_provider = RetryProvider([Llama, HuggingChat]) ) codellama_34b_instruct = Model( @@ -279,6 +299,12 @@ command_r_plus = Model( best_provider = RetryProvider([HuggingChat, Cohere]) ) +blackbox = Model( + name = 'blackbox', + base_provider = 'blackbox', + best_provider = Blackbox +) + class ModelUtils: """ Utility class for mapping string identifiers to Model instances. @@ -302,10 +328,12 @@ class ModelUtils: 'gpt-4-32k-0613' : gpt_4_32k_0613, 'gpt-4-turbo' : gpt_4_turbo, - # Llama 2 + # Llama 'llama2-7b' : llama2_7b, 'llama2-13b': llama2_13b, 'llama2-70b': llama2_70b, + 'llama3-8b-instruct' : llama3_8b_instruct, + 'llama3-70b-instruct': llama3_70b_instruct, 'codellama-34b-instruct': codellama_34b_instruct, 'codellama-70b-instruct': codellama_70b_instruct, @@ -331,6 +359,7 @@ class ModelUtils: 'claude-3-sonnet': claude_3_sonnet, # other + 'blackbox': blackbox, 'command-r+': command_r_plus, 'dbrx-instruct': dbrx_instruct, 'lzlv-70b': lzlv_70b, diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py index fa3f15a9..685a6f98 100644 --- a/g4f/providers/base_provider.py +++ b/g4f/providers/base_provider.py @@ -19,8 +19,13 @@ else: # Set Windows event loop policy for better compatibility with asyncio and curl_cffi if sys.platform == 'win32': - if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + try: + from curl_cffi import aio + if not hasattr(aio, "_get_selector"): + if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + except ImportError: + pass def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]: try: |