diff options
author | abc <98614666+xtekky@users.noreply.github.com> | 2023-09-17 23:24:22 +0200 |
---|---|---|
committer | abc <98614666+xtekky@users.noreply.github.com> | 2023-09-17 23:24:22 +0200 |
commit | 54f4f3904e018bb36b2d76a5a6880da69f6a324e (patch) | |
tree | c8906383c147dc9048f5db1debe46faf1e5418e4 | |
parent | Update __init__.py (diff) | |
parent | Merge pull request #911 from xtekky/revert-907-fix-type-hint (diff) | |
download | gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar.gz gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar.bz2 gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar.lz gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar.xz gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.tar.zst gpt4free-54f4f3904e018bb36b2d76a5a6880da69f6a324e.zip |
31 files changed, 250 insertions, 38 deletions
@@ -145,7 +145,7 @@ import g4f print(g4f.Provider.Ails.params) # supported args -# Automatic selection of Provider +# Automatic selection of provider # streamed completion response = g4f.ChatCompletion.create( @@ -166,7 +166,7 @@ response = g4f.ChatCompletion.create( print(response) -# Set with Provider +# Set with provider response = g4f.ChatCompletion.create( model="gpt-3.5-turbo", provider=g4f.Provider.DeepAi, @@ -474,8 +474,8 @@ if __name__ == "__main__": to add another provider, its very simple: -1. create a new file in [g4f/Provider](./g4f/Provider) with the name of the Provider -2. Implement a class that extends [BaseProvider](./g4f/Provider/base_provider.py). +1. create a new file in [g4f/provider](./g4f/provider) with the name of the Provider +2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py). ```py from .base_provider import BaseProvider @@ -499,7 +499,7 @@ class HogeService(BaseProvider): 3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`... 4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration -5. Add the Provider Name in [g4f/provider/\_\_init__.py](./g4f/Provider/__init__.py) +5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py) ```py from .base_provider import BaseProvider diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 6aec2065..2e129896 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -70,4 +70,4 @@ class AItianhu(AsyncGeneratorProvider): ("top_p", "int"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py index 9ead63d9..d533ae24 100644 --- a/g4f/Provider/Ails.py +++ b/g4f/Provider/Ails.py @@ -85,7 +85,7 @@ class Ails(AsyncGeneratorProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" def _hash(json_data: dict[str, str]) -> SHA256: diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index c65fb6f1..dbfc588d 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -75,4 +75,4 @@ class Aivvm(AsyncGeneratorProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py index 9583dbef..2137d820 100644 --- a/g4f/Provider/Bard.py +++ b/g4f/Provider/Bard.py @@ -88,4 +88,4 @@ class Bard(AsyncProvider): ("proxy", "str"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py index 6e596e2a..b98fe565 100644 --- a/g4f/Provider/ChatBase.py +++ b/g4f/Provider/ChatBase.py @@ -59,4 +59,4 @@ class ChatBase(AsyncGeneratorProvider): ("stream", "bool"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py index 165cc5ca..8b868f8e 100644 --- a/g4f/Provider/ChatgptLogin.py +++ b/g4f/Provider/ChatgptLogin.py @@ -64,4 +64,4 @@ class ChatgptLogin(AsyncProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/CodeLinkAva.py b/g4f/Provider/CodeLinkAva.py index 31a0fabb..3ab4e264 100644 --- a/g4f/Provider/CodeLinkAva.py +++ b/g4f/Provider/CodeLinkAva.py @@ -60,4 +60,4 @@ class CodeLinkAva(AsyncGeneratorProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/DfeHub.py b/g4f/Provider/DfeHub.py index c1f4c059..d40e0380 100644 --- a/g4f/Provider/DfeHub.py +++ b/g4f/Provider/DfeHub.py @@ -74,4 +74,4 @@ class DfeHub(BaseProvider): ("top_p", "int"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py index 3b3b6a30..dae5196d 100644 --- a/g4f/Provider/EasyChat.py +++ b/g4f/Provider/EasyChat.py @@ -108,4 +108,4 @@ class EasyChat(BaseProvider): ("active_server", "int"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py index 7dfdce9a..261c53c0 100644 --- a/g4f/Provider/Equing.py +++ b/g4f/Provider/Equing.py @@ -78,4 +78,4 @@ class Equing(BaseProvider): ("stream", "bool"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/FastGpt.py b/g4f/Provider/FastGpt.py index e8893965..ef47f752 100644 --- a/g4f/Provider/FastGpt.py +++ b/g4f/Provider/FastGpt.py @@ -83,4 +83,4 @@ class FastGpt(ABC): ("stream", "bool"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py index 6687eb93..b96efaac 100644 --- a/g4f/Provider/GetGpt.py +++ b/g4f/Provider/GetGpt.py @@ -66,7 +66,7 @@ class GetGpt(BaseProvider): ('max_tokens', 'int'), ] param = ', '.join([': '.join(p) for p in params]) - return f'g4f.Provider.{cls.__name__} supports: ({param})' + return f'g4f.provider.{cls.__name__} supports: ({param})' def _encrypt(e: str): diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py index fa837156..30090a58 100644 --- a/g4f/Provider/H2o.py +++ b/g4f/Provider/H2o.py @@ -98,4 +98,4 @@ class H2o(AsyncGeneratorProvider): ("return_full_text", "bool"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index ce21bfda..85f879f3 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -103,4 +103,4 @@ class HuggingChat(AsyncGeneratorProvider): ("proxy", "str"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index e9078651..33224d2e 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -88,4 +88,4 @@ class Liaobots(AsyncGeneratorProvider): ("auth", "str"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Lockchat.py b/g4f/Provider/Lockchat.py index c9db82f1..c15eec8d 100644 --- a/g4f/Provider/Lockchat.py +++ b/g4f/Provider/Lockchat.py @@ -61,4 +61,4 @@ class Lockchat(BaseProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/OpenAssistant.py b/g4f/Provider/OpenAssistant.py index bef50ffb..3a931597 100644 --- a/g4f/Provider/OpenAssistant.py +++ b/g4f/Provider/OpenAssistant.py @@ -99,4 +99,4 @@ class OpenAssistant(AsyncGeneratorProvider): ("proxy", "str"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py index 999d6247..cbe886f0 100644 --- a/g4f/Provider/OpenaiChat.py +++ b/g4f/Provider/OpenaiChat.py @@ -91,4 +91,4 @@ class OpenaiChat(AsyncProvider): ("cookies", "dict[str, str]") ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/Raycast.py b/g4f/Provider/Raycast.py index 9d5f3ac9..7ddc8acd 100644 --- a/g4f/Provider/Raycast.py +++ b/g4f/Provider/Raycast.py @@ -69,4 +69,4 @@ class Raycast(BaseProvider): ("auth", "str"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py index 500837f7..72fce3ac 100644 --- a/g4f/Provider/Theb.py +++ b/g4f/Provider/Theb.py @@ -94,4 +94,4 @@ class Theb(BaseProvider): ("top_p", "int") ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/V50.py b/g4f/Provider/V50.py index 7b873979..81a95ba8 100644 --- a/g4f/Provider/V50.py +++ b/g4f/Provider/V50.py @@ -64,4 +64,4 @@ class V50(BaseProvider): ("top_p", "int"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py index 61e84409..31ad8b80 100644 --- a/g4f/Provider/Vitalentum.py +++ b/g4f/Provider/Vitalentum.py @@ -63,4 +63,4 @@ class Vitalentum(AsyncGeneratorProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py index 25cacd3d..0a46f6ee 100644 --- a/g4f/Provider/Wuguokai.py +++ b/g4f/Provider/Wuguokai.py @@ -60,4 +60,4 @@ class Wuguokai(BaseProvider): ("stream", "bool") ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py index 111ba160..1986b6d3 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/Ylokh.py @@ -73,4 +73,4 @@ class Ylokh(AsyncGeneratorProvider): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})"
\ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py index d6bcc8e7..0f499c8c 100644 --- a/g4f/Provider/base_provider.py +++ b/g4f/Provider/base_provider.py @@ -34,7 +34,7 @@ class BaseProvider(ABC): ("stream", "bool"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.Provider.{cls.__name__} supports: ({param})" + return f"g4f.provider.{cls.__name__} supports: ({param})" _cookies = {} diff --git a/g4f/__init__.py b/g4f/__init__.py index d8681297..5dead0e9 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -21,8 +21,9 @@ class ChatCompletion: except KeyError: raise Exception(f'The model: {model} does not exist') + if not provider: - if isinstance(model.best_provider, list): + if isinstance(model.best_provider, tuple): provider = random.choice(model.best_provider) else: provider = model.best_provider diff --git a/g4f/models.py b/g4f/models.py new file mode 100644 index 00000000..e095ce7e --- /dev/null +++ b/g4f/models.py @@ -0,0 +1,212 @@ +from __future__ import annotations +from dataclasses import dataclass +from .Provider import BaseProvider, Bard, H2o, Vercel +from .Provider import Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, CodeLinkAva +from .Provider import DeepAi, Vercel, Vitalentum, Ylokh, You, Yqcloud +from .typing import Union + +@dataclass +class Model: + name: str + base_provider: str + best_provider: Union[type[BaseProvider], tuple[type[BaseProvider]]] = None + +# Config for HuggingChat, OpenAssistant +# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You +default = Model( + name="", + base_provider="huggingface" +) + +# GPT-3.5 / GPT-4 +gpt_35_turbo = Model( + name = 'gpt-3.5-turbo', + base_provider = 'openai', + best_provider = ( + Vercel, Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, + CodeLinkAva, DeepAi, Vitalentum, Ylokh, You, Yqcloud + ) +) + +gpt_4 = Model( + name = 'gpt-4', + base_provider = 'openai', +) + +# Bard +palm = Model( + name = 'palm', + base_provider = 'google', + best_provider = Bard) + +# H2o +falcon_7b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', + base_provider = 'huggingface', + best_provider = H2o) + +falcon_40b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', + base_provider = 'huggingface', + best_provider = H2o) + +llama_13b = Model( + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', + base_provider = 'huggingface', + best_provider = H2o) + +# Vercel +claude_instant_v1 = Model( + name = 'anthropic:claude-instant-v1', + base_provider = 'anthropic', + best_provider = Vercel) + +claude_v1 = Model( + name = 'anthropic:claude-v1', + base_provider = 'anthropic', + best_provider = Vercel) + +claude_v2 = Model( + name = 'anthropic:claude-v2', + base_provider = 'anthropic', + best_provider = Vercel) + +command_light_nightly = Model( + name = 'cohere:command-light-nightly', + base_provider = 'cohere', + best_provider = Vercel) + +command_nightly = Model( + name = 'cohere:command-nightly', + base_provider = 'cohere', + best_provider = Vercel) + +gpt_neox_20b = Model( + name = 'huggingface:EleutherAI/gpt-neox-20b', + base_provider = 'huggingface', + best_provider = Vercel) + +oasst_sft_1_pythia_12b = Model( + name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b', + base_provider = 'huggingface', + best_provider = Vercel) + +oasst_sft_4_pythia_12b_epoch_35 = Model( + name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + base_provider = 'huggingface', + best_provider = Vercel) + +santacoder = Model( + name = 'huggingface:bigcode/santacoder', + base_provider = 'huggingface', + best_provider = Vercel) + +bloom = Model( + name = 'huggingface:bigscience/bloom', + base_provider = 'huggingface', + best_provider = Vercel) + +flan_t5_xxl = Model( + name = 'huggingface:google/flan-t5-xxl', + base_provider = 'huggingface', + best_provider = Vercel) + +code_davinci_002 = Model( + name = 'openai:code-davinci-002', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k = Model( + name = 'openai:gpt-3.5-turbo-16k', + base_provider = 'openai', + best_provider = Vercel) + +gpt_35_turbo_16k_0613 = Model( + name = 'openai:gpt-3.5-turbo-16k-0613', + base_provider = 'openai') + +gpt_4_0613 = Model( + name = 'openai:gpt-4-0613', + base_provider = 'openai', + best_provider = Vercel) + +text_ada_001 = Model( + name = 'openai:text-ada-001', + base_provider = 'openai', + best_provider = Vercel) + +text_babbage_001 = Model( + name = 'openai:text-babbage-001', + base_provider = 'openai', + best_provider = Vercel) + +text_curie_001 = Model( + name = 'openai:text-curie-001', + base_provider = 'openai', + best_provider = Vercel) + +text_davinci_002 = Model( + name = 'openai:text-davinci-002', + base_provider = 'openai', + best_provider = Vercel) + +text_davinci_003 = Model( + name = 'openai:text-davinci-003', + base_provider = 'openai', + best_provider = Vercel) + +llama13b_v2_chat = Model( + name = 'replicate:a16z-infra/llama13b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) + +llama7b_v2_chat = Model( + name = 'replicate:a16z-infra/llama7b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) + + +class ModelUtils: + convert: dict[str, Model] = { + # GPT-3.5 / GPT-4 + 'gpt-3.5-turbo' : gpt_35_turbo, + 'gpt-4' : gpt_4, + + # Bard + 'palm2' : palm, + 'palm' : palm, + 'google' : palm, + 'google-bard' : palm, + 'google-palm' : palm, + 'bard' : palm, + + # H2o + 'falcon-40b' : falcon_40b, + 'falcon-7b' : falcon_7b, + 'llama-13b' : llama_13b, + + # Vercel + 'claude-instant-v1' : claude_instant_v1, + 'claude-v1' : claude_v1, + 'claude-v2' : claude_v2, + 'command-nightly' : command_nightly, + 'gpt-neox-20b' : gpt_neox_20b, + 'santacoder' : santacoder, + 'bloom' : bloom, + 'flan-t5-xxl' : flan_t5_xxl, + 'code-davinci-002' : code_davinci_002, + 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, + 'gpt-4-0613' : gpt_4_0613, + 'text-ada-001' : text_ada_001, + 'text-babbage-001' : text_babbage_001, + 'text-curie-001' : text_curie_001, + 'text-davinci-002' : text_davinci_002, + 'text-davinci-003' : text_davinci_003, + 'llama13b-v2-chat' : llama13b_v2_chat, + 'llama7b-v2-chat' : llama7b_v2_chat, + + 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, + 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, + 'command-light-nightly' : command_light_nightly, + 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + }
\ No newline at end of file diff --git a/g4f/typing.py b/g4f/typing.py index 2a9396a8..5f63c222 100644 --- a/g4f/typing.py +++ b/g4f/typing.py @@ -12,10 +12,9 @@ CreateResult = Generator[str, None, None] __all__ = [ 'Any', 'AsyncGenerator', - 'CreateResult', 'Generator', - 'SHA256', 'Tuple', 'TypedDict', - 'Union', + 'SHA256', + 'CreateResult', ] diff --git a/tool/provider_init.py b/tool/provider_init.py index fac099ed..cd7f9333 100644 --- a/tool/provider_init.py +++ b/tool/provider_init.py @@ -3,13 +3,13 @@ from pathlib import Path def main(): content = create_content() - with open("g4f/Provider/__init__.py", "w", encoding="utf-8") as f: + with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f: f.write(content) def create_content(): path = Path() - paths = path.glob("g4f/Provider/*.py") + paths = path.glob("g4f/provider/*.py") paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]] classnames = [p.stem for p in paths] diff --git a/tool/readme_table.py b/tool/readme_table.py index 0598584d..9e43b0ae 100644 --- a/tool/readme_table.py +++ b/tool/readme_table.py @@ -81,7 +81,7 @@ def print_providers(): netloc = urlparse(_provider.url).netloc website = f"[{netloc}]({_provider.url})" - provider_name = f"g4f.Provider.{_provider.__name__}" + provider_name = f"g4f.provider.{_provider.__name__}" has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌" has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌" @@ -128,7 +128,7 @@ def print_models(): name = re.split(r":|/", model.name)[-1] base_provider = base_provider_names[model.base_provider] - provider_name = f"g4f.Provider.{model.best_provider.__name__}" + provider_name = f"g4f.provider.{model.best_provider.__name__}" provider_url = provider_urls[model.best_provider.__name__] netloc = urlparse(provider_url).netloc website = f"[{netloc}]({provider_url})" |