diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-25 18:43:55 +0200 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-25 18:43:55 +0200 |
commit | f55f867a01b279992470d992fae55cd2e559a9ea (patch) | |
tree | 06ad750dd9cc9d6e5cea933cce297890a637ed64 /g4f | |
parent | refactor(g4f/api/__init__.py): refactor API structure and improve async handling (diff) | |
download | gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar.gz gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar.bz2 gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar.lz gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar.xz gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.tar.zst gpt4free-f55f867a01b279992470d992fae55cd2e559a9ea.zip |
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/client/client.py | 30 |
1 files changed, 26 insertions, 4 deletions
diff --git a/g4f/client/client.py b/g4f/client/client.py index 41238df5..2772f9bb 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -149,6 +149,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -161,6 +162,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -221,6 +228,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -233,6 +241,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -271,16 +285,18 @@ class Completions: **kwargs ) - # Removed 'await' here since 'async_iter_response' returns an async generator - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) - + # Handle streaming or non-streaming responses if stream: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) return response else: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) async for result in response: return result + class Chat: completions: Completions @@ -401,6 +417,12 @@ class Image: def __repr__(self): return f"Image(url={self.url}, b64_json={'<base64 data>' if self.b64_json else None})" + def to_dict(self): + return { + "url": self.url, + "b64_json": self.b64_json + } + class ImagesResponse: def __init__(self, data: list[Image]): self.data = data |