summaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-15 10:28:33 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-15 10:28:33 +0200
commitdc9a6a9dc725430393244dd80eb0aaa858d83127 (patch)
tree051a626eda49ddace3633e6bfb7bf7179e2df344 /docs
parentrefactor(docs): Update AsyncClient API documentation to reflect changes in API usage and add asyncio examples (diff)
downloadgpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar.gz
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar.bz2
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar.lz
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar.xz
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.tar.zst
gpt4free-dc9a6a9dc725430393244dd80eb0aaa858d83127.zip
Diffstat (limited to 'docs')
-rw-r--r--docs/client.md56
1 files changed, 48 insertions, 8 deletions
diff --git a/docs/client.md b/docs/client.md
index a889443c..4273d9d9 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -1,3 +1,4 @@
+
### G4F - Client API
#### Introduction
@@ -33,7 +34,7 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
client = Client(
provider=OpenaiChat,
image_provider=Gemini,
- ...
+ # Add any other necessary parameters
)
```
@@ -48,7 +49,7 @@ from g4f.client import Client
client = Client(
api_key="...",
proxies="http://user:pass@host",
- ...
+ # Add any other necessary parameters
)
```
@@ -59,10 +60,13 @@ client = Client(
You can use the `ChatCompletions` endpoint to generate text completions as follows:
```python
+from g4f.client import Client
+client = Client()
+
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
- ...
+ # Add any other necessary parameters
)
print(response.choices[0].message.content)
```
@@ -70,12 +74,16 @@ print(response.choices[0].message.content)
Also streaming are supported:
```python
+from g4f.client import Client
+
+client = Client()
+
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
- ...
)
+
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content or "", end="")
@@ -86,13 +94,17 @@ for chunk in stream:
Generate images using a specified prompt:
```python
+from g4f.client import Client
+
+client = Client()
response = client.images.generate(
model="dall-e-3",
prompt="a white siamese cat",
- ...
+ # Add any other necessary parameters
)
image_url = response.data[0].url
+print(f"Generated image URL: {image_url}")
```
**Creating Image Variations:**
@@ -100,13 +112,17 @@ image_url = response.data[0].url
Create variations of an existing image:
```python
+from g4f.client import Client
+
+client = Client()
response = client.images.create_variation(
image=open("cat.jpg", "rb"),
model="bing",
- ...
+ # Add any other necessary parameters
)
image_url = response.data[0].url
+print(f"Generated image URL: {image_url}")
```
Original / Variant:
@@ -120,6 +136,7 @@ from g4f.Provider import RetryProvider, Phind, FreeChatgpt, Liaobots
import g4f.debug
g4f.debug.logging = True
+g4f.debug.version_check = False
client = Client(
provider=RetryProvider([Phind, FreeChatgpt, Liaobots], shuffle=False)
@@ -163,13 +180,36 @@ User: What are on this image?
Bot: There is a waterfall in the middle of a jungle. There is a rainbow over...
```
+### Example: Using a Vision Model
+
+The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response.
+
+```python
+import g4f
+import requests
+from g4f.client import Client
+
+image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
+# Or: image = open("docs/cat.jpeg", "rb")
+
+client = Client()
+response = client.chat.completions.create(
+ model=g4f.models.default,
+ messages=[{"role": "user", "content": "What are on this image?"}],
+ provider=g4f.Provider.Bing,
+ image=image,
+ # Add any other necessary parameters
+)
+print(response.choices[0].message.content)
+```
+
#### Advanced example: A command-line program
```python
import g4f
from g4f.client import Client
# Initialize the GPT client with the desired provider
-client = Client(provider=g4f.Provider.Bing)
+client = Client()
# Initialize an empty conversation history
messages = []
@@ -203,4 +243,4 @@ while True:
print(f"An error occurred: {e}")
```
-[Return to Home](/) \ No newline at end of file
+[Return to Home](/)