summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Blackbox.py3
-rw-r--r--g4f/Provider/HuggingChat.py2
-rw-r--r--g4f/models.py2
3 files changed, 4 insertions, 3 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 40696c82..3e183076 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -154,7 +154,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in response.content.iter_any():
if chunk:
decoded_chunk = chunk.decode()
- # Видаляємо префікс $@$v=v1.10-rv1$@$ та інші подібні
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
- if decoded_chunk.strip(): # Перевіряємо, чи залишився якийсь текст після видалення префікса
+ if decoded_chunk.strip():
yield decoded_chunk
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 06216ade..01490e2f 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -16,6 +16,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
+ 'Qwen/Qwen2.5-72B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'mistralai/Mistral-7B-Instruct-v0.3',
@@ -25,6 +26,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
+ "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
diff --git a/g4f/models.py b/g4f/models.py
index f5980577..2f4405a3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -443,7 +443,7 @@ qwen_1_5_110b = Model(
qwen_2_72b = Model(
name = 'qwen-2-72b',
base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, Airforce])
+ best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
)
qwen_turbo = Model(