mirror of https://github.com/open-webui/open-webui
Added timeout setting for ollama streaming response
This commit is contained in:
parent
bdd2ac0015
commit
e130ad74d1
|
@ -18,6 +18,10 @@ If you're experiencing connection issues, it’s often due to the WebUI docker c
|
|||
docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main
|
||||
```
|
||||
|
||||
### Error on Slow Reponses for Ollama
|
||||
|
||||
Open WebUI has a default timeout of 15 minutes for Ollama to finish generating the response. If needed, this can be adjusted via the environment variable OLLAMA_GENERATE_TIMEOUT, which sets the timeout in seconds.
|
||||
|
||||
### General Connection Errors
|
||||
|
||||
**Ensure Ollama Version is Up-to-Date**: Always start by checking that you have the latest version of Ollama. Visit [Ollama's official site](https://ollama.com/) for the latest updates.
|
||||
|
|
|
@ -46,6 +46,7 @@ from config import (
|
|||
SRC_LOG_LEVELS,
|
||||
OLLAMA_BASE_URLS,
|
||||
ENABLE_OLLAMA_API,
|
||||
OLLAMA_GENERATE_TIMEOUT,
|
||||
ENABLE_MODEL_FILTER,
|
||||
MODEL_FILTER_LIST,
|
||||
UPLOAD_DIR,
|
||||
|
@ -154,7 +155,7 @@ async def cleanup_response(
|
|||
async def post_streaming_url(url: str, payload: str):
|
||||
r = None
|
||||
try:
|
||||
session = aiohttp.ClientSession(trust_env=True)
|
||||
session = aiohttp.ClientSession(trust_env=True, timeout=aiohttp.ClientTimeout(total=OLLAMA_GENERATE_TIMEOUT))
|
||||
r = await session.post(url, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
|
|
|
@ -425,6 +425,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
|
|||
)
|
||||
|
||||
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
|
||||
OLLAMA_GENERATE_TIMEOUT = int(os.environ.get("OLLAMA_GENERATE_TIMEOUT", "900"))
|
||||
K8S_FLAG = os.environ.get("K8S_FLAG", "")
|
||||
USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
|
||||
|
||||
|
|
Loading…
Reference in New Issue