diff options
author | kqlio67 <166700875+kqlio67@users.noreply.github.com> | 2024-10-15 13:10:12 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-15 13:10:12 +0200 |
commit | 069b6cebdd541fda7d90d6116269729951893504 (patch) | |
tree | 0aa33cb21009ff3d85085ff81c235a205fed0121 | |
parent | feat(README.md): update documentation and add new features (diff) | |
parent | Merge pull request #2275 from hansipie/setollamahost (diff) | |
download | gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar.gz gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar.bz2 gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar.lz gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar.xz gpt4free-069b6cebdd541fda7d90d6116269729951893504.tar.zst gpt4free-069b6cebdd541fda7d90d6116269729951893504.zip |
-rw-r--r-- | README.md | 27 | ||||
-rw-r--r-- | docker-compose.yml | 4 | ||||
-rw-r--r-- | g4f/Provider/Ollama.py | 13 |
3 files changed, 40 insertions, 4 deletions
@@ -747,6 +747,33 @@ set G4F_PROXY=http://host:port </a> </td> </tr> + <tr> + <td> + <a href="https://github.com/yjg30737/pyqt-openai"> + <b>VividNode (pyqt-openai)</b> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/stargazers"> + <img alt="Stars" src="https://img.shields.io/github/stars/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/network/members"> + <img alt="Forks" src="https://img.shields.io/github/forks/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/issues"> + <img alt="Issues" src="https://img.shields.io/github/issues/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/pulls"> + <img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + </tr> </tbody> </table> diff --git a/docker-compose.yml b/docker-compose.yml index 1b99ba97..3f8bc4ea 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,4 +12,6 @@ services: ports: - '8080:8080' - '1337:1337' - - '7900:7900'
\ No newline at end of file + - '7900:7900' + environment: + - OLLAMA_HOST=host.docker.internal diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/Ollama.py index a44aaacd..f9116541 100644 --- a/g4f/Provider/Ollama.py +++ b/g4f/Provider/Ollama.py @@ -1,6 +1,7 @@ from __future__ import annotations import requests +import os from .needs_auth.Openai import Openai from ..typing import AsyncResult, Messages @@ -14,9 +15,11 @@ class Ollama(Openai): @classmethod def get_models(cls): if not cls.models: - url = 'http://127.0.0.1:11434/api/tags' + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" models = requests.get(url).json()["models"] - cls.models = [model['name'] for model in models] + cls.models = [model["name"] for model in models] cls.default_model = cls.models[0] return cls.models @@ -25,9 +28,13 @@ class Ollama(Openai): cls, model: str, messages: Messages, - api_base: str = "http://localhost:11434/v1", + api_base: str = None, **kwargs ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" return super().create_async_generator( model, messages, api_base=api_base, **kwargs )
\ No newline at end of file |