mirror of https://github.com/open-webui/open-webui
Merge remote-tracking branch 'upstream/dev' into docling_context_extraction_engine
merge upstream
This commit is contained in:
commit
0aa42615f9
|
@ -3,6 +3,7 @@ import logging
|
|||
import os
|
||||
import shutil
|
||||
import base64
|
||||
import redis
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
@ -17,6 +18,7 @@ from open_webui.env import (
|
|||
DATA_DIR,
|
||||
DATABASE_URL,
|
||||
ENV,
|
||||
REDIS_URL,
|
||||
FRONTEND_BUILD_DIR,
|
||||
OFFLINE_MODE,
|
||||
OPEN_WEBUI_DIR,
|
||||
|
@ -248,9 +250,14 @@ class PersistentConfig(Generic[T]):
|
|||
|
||||
class AppConfig:
|
||||
_state: dict[str, PersistentConfig]
|
||||
_redis: Optional[redis.Redis] = None
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, redis_url: Optional[str] = None):
|
||||
super().__setattr__("_state", {})
|
||||
if redis_url:
|
||||
super().__setattr__(
|
||||
"_redis", redis.Redis.from_url(redis_url, decode_responses=True)
|
||||
)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if isinstance(value, PersistentConfig):
|
||||
|
@ -259,7 +266,31 @@ class AppConfig:
|
|||
self._state[key].value = value
|
||||
self._state[key].save()
|
||||
|
||||
if self._redis:
|
||||
redis_key = f"open-webui:config:{key}"
|
||||
self._redis.set(redis_key, json.dumps(self._state[key].value))
|
||||
|
||||
def __getattr__(self, key):
|
||||
if key not in self._state:
|
||||
raise AttributeError(f"Config key '{key}' not found")
|
||||
|
||||
# If Redis is available, check for an updated value
|
||||
if self._redis:
|
||||
redis_key = f"open-webui:config:{key}"
|
||||
redis_value = self._redis.get(redis_key)
|
||||
|
||||
if redis_value is not None:
|
||||
try:
|
||||
decoded_value = json.loads(redis_value)
|
||||
|
||||
# Update the in-memory value if different
|
||||
if self._state[key].value != decoded_value:
|
||||
self._state[key].value = decoded_value
|
||||
log.info(f"Updated {key} from Redis: {decoded_value}")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
log.error(f"Invalid JSON format in Redis for {key}: {redis_value}")
|
||||
|
||||
return self._state[key].value
|
||||
|
||||
|
||||
|
@ -1956,6 +1987,12 @@ TAVILY_API_KEY = PersistentConfig(
|
|||
os.getenv("TAVILY_API_KEY", ""),
|
||||
)
|
||||
|
||||
TAVILY_EXTRACT_DEPTH = PersistentConfig(
|
||||
"TAVILY_EXTRACT_DEPTH",
|
||||
"rag.web.search.tavily_extract_depth",
|
||||
os.getenv("TAVILY_EXTRACT_DEPTH", "basic"),
|
||||
)
|
||||
|
||||
JINA_API_KEY = PersistentConfig(
|
||||
"JINA_API_KEY",
|
||||
"rag.web.search.jina_api_key",
|
||||
|
|
|
@ -330,7 +330,7 @@ ENABLE_REALTIME_CHAT_SAVE = (
|
|||
# REDIS
|
||||
####################################
|
||||
|
||||
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
|
||||
REDIS_URL = os.environ.get("REDIS_URL", "")
|
||||
|
||||
####################################
|
||||
# WEBUI_AUTH (Required for security)
|
||||
|
|
|
@ -213,6 +213,7 @@ from open_webui.config import (
|
|||
SERPSTACK_API_KEY,
|
||||
SERPSTACK_HTTPS,
|
||||
TAVILY_API_KEY,
|
||||
TAVILY_EXTRACT_DEPTH,
|
||||
BING_SEARCH_V7_ENDPOINT,
|
||||
BING_SEARCH_V7_SUBSCRIPTION_KEY,
|
||||
BRAVE_SEARCH_API_KEY,
|
||||
|
@ -313,6 +314,7 @@ from open_webui.env import (
|
|||
AUDIT_EXCLUDED_PATHS,
|
||||
AUDIT_LOG_LEVEL,
|
||||
CHANGELOG,
|
||||
REDIS_URL,
|
||||
GLOBAL_LOG_LEVEL,
|
||||
MAX_BODY_LOG_SIZE,
|
||||
SAFE_MODE,
|
||||
|
@ -419,7 +421,7 @@ app = FastAPI(
|
|||
|
||||
oauth_manager = OAuthManager(app)
|
||||
|
||||
app.state.config = AppConfig()
|
||||
app.state.config = AppConfig(redis_url=REDIS_URL)
|
||||
|
||||
app.state.WEBUI_NAME = WEBUI_NAME
|
||||
app.state.LICENSE_METADATA = None
|
||||
|
@ -616,6 +618,7 @@ app.state.config.RAG_WEB_SEARCH_TRUST_ENV = RAG_WEB_SEARCH_TRUST_ENV
|
|||
app.state.config.PLAYWRIGHT_WS_URI = PLAYWRIGHT_WS_URI
|
||||
app.state.config.FIRECRAWL_API_BASE_URL = FIRECRAWL_API_BASE_URL
|
||||
app.state.config.FIRECRAWL_API_KEY = FIRECRAWL_API_KEY
|
||||
app.state.config.TAVILY_EXTRACT_DEPTH = TAVILY_EXTRACT_DEPTH
|
||||
|
||||
app.state.EMBEDDING_FUNCTION = None
|
||||
app.state.ef = None
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
import requests
|
||||
import logging
|
||||
from typing import Iterator, List, Literal, Union
|
||||
|
||||
from langchain_core.document_loaders import BaseLoader
|
||||
from langchain_core.documents import Document
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
class TavilyLoader(BaseLoader):
|
||||
"""Extract web page content from URLs using Tavily Extract API.
|
||||
|
||||
This is a LangChain document loader that uses Tavily's Extract API to
|
||||
retrieve content from web pages and return it as Document objects.
|
||||
|
||||
Args:
|
||||
urls: URL or list of URLs to extract content from.
|
||||
api_key: The Tavily API key.
|
||||
extract_depth: Depth of extraction, either "basic" or "advanced".
|
||||
continue_on_failure: Whether to continue if extraction of a URL fails.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
urls: Union[str, List[str]],
|
||||
api_key: str,
|
||||
extract_depth: Literal["basic", "advanced"] = "basic",
|
||||
continue_on_failure: bool = True,
|
||||
) -> None:
|
||||
"""Initialize Tavily Extract client.
|
||||
|
||||
Args:
|
||||
urls: URL or list of URLs to extract content from.
|
||||
api_key: The Tavily API key.
|
||||
include_images: Whether to include images in the extraction.
|
||||
extract_depth: Depth of extraction, either "basic" or "advanced".
|
||||
advanced extraction retrieves more data, including tables and
|
||||
embedded content, with higher success but may increase latency.
|
||||
basic costs 1 credit per 5 successful URL extractions,
|
||||
advanced costs 2 credits per 5 successful URL extractions.
|
||||
continue_on_failure: Whether to continue if extraction of a URL fails.
|
||||
"""
|
||||
if not urls:
|
||||
raise ValueError("At least one URL must be provided.")
|
||||
|
||||
self.api_key = api_key
|
||||
self.urls = urls if isinstance(urls, list) else [urls]
|
||||
self.extract_depth = extract_depth
|
||||
self.continue_on_failure = continue_on_failure
|
||||
self.api_url = "https://api.tavily.com/extract"
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Extract and yield documents from the URLs using Tavily Extract API."""
|
||||
batch_size = 20
|
||||
for i in range(0, len(self.urls), batch_size):
|
||||
batch_urls = self.urls[i:i + batch_size]
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.api_key}"
|
||||
}
|
||||
# Use string for single URL, array for multiple URLs
|
||||
urls_param = batch_urls[0] if len(batch_urls) == 1 else batch_urls
|
||||
payload = {
|
||||
"urls": urls_param,
|
||||
"extract_depth": self.extract_depth
|
||||
}
|
||||
# Make the API call
|
||||
response = requests.post(
|
||||
self.api_url,
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
response.raise_for_status()
|
||||
response_data = response.json()
|
||||
# Process successful results
|
||||
for result in response_data.get("results", []):
|
||||
url = result.get("url", "")
|
||||
content = result.get("raw_content", "")
|
||||
if not content:
|
||||
log.warning(f"No content extracted from {url}")
|
||||
continue
|
||||
# Add URLs as metadata
|
||||
metadata = {"source": url}
|
||||
yield Document(
|
||||
page_content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
for failed in response_data.get("failed_results", []):
|
||||
url = failed.get("url", "")
|
||||
error = failed.get("error", "Unknown error")
|
||||
log.error(f"Failed to extract content from {url}: {error}")
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
log.error(f"Error extracting content from batch {batch_urls}: {e}")
|
||||
else:
|
||||
raise e
|
|
@ -1,4 +1,5 @@
|
|||
from opensearchpy import OpenSearch
|
||||
from opensearchpy.helpers import bulk
|
||||
from typing import Optional
|
||||
|
||||
from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
|
||||
|
@ -20,8 +21,14 @@ class OpenSearchClient:
|
|||
verify_certs=OPENSEARCH_CERT_VERIFY,
|
||||
http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD),
|
||||
)
|
||||
|
||||
def _get_index_name(self, collection_name: str) -> str:
|
||||
return f"{self.index_prefix}_{collection_name}"
|
||||
|
||||
def _result_to_get_result(self, result) -> GetResult:
|
||||
if not result["hits"]["hits"]:
|
||||
return None
|
||||
|
||||
ids = []
|
||||
documents = []
|
||||
metadatas = []
|
||||
|
@ -31,9 +38,12 @@ class OpenSearchClient:
|
|||
documents.append(hit["_source"].get("text"))
|
||||
metadatas.append(hit["_source"].get("metadata"))
|
||||
|
||||
return GetResult(ids=ids, documents=documents, metadatas=metadatas)
|
||||
return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas])
|
||||
|
||||
def _result_to_search_result(self, result) -> SearchResult:
|
||||
if not result["hits"]["hits"]:
|
||||
return None
|
||||
|
||||
ids = []
|
||||
distances = []
|
||||
documents = []
|
||||
|
@ -46,25 +56,32 @@ class OpenSearchClient:
|
|||
metadatas.append(hit["_source"].get("metadata"))
|
||||
|
||||
return SearchResult(
|
||||
ids=ids, distances=distances, documents=documents, metadatas=metadatas
|
||||
ids=[ids], distances=[distances], documents=[documents], metadatas=[metadatas]
|
||||
)
|
||||
|
||||
def _create_index(self, collection_name: str, dimension: int):
|
||||
body = {
|
||||
"settings": {
|
||||
"index": {
|
||||
"knn": True
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"id": {"type": "keyword"},
|
||||
"vector": {
|
||||
"type": "dense_vector",
|
||||
"dims": dimension, # Adjust based on your vector dimensions
|
||||
"index": true,
|
||||
"type": "knn_vector",
|
||||
"dimension": dimension, # Adjust based on your vector dimensions
|
||||
"index": True,
|
||||
"similarity": "faiss",
|
||||
"method": {
|
||||
"name": "hnsw",
|
||||
"space_type": "ip", # Use inner product to approximate cosine similarity
|
||||
"space_type": "innerproduct", # Use inner product to approximate cosine similarity
|
||||
"engine": "faiss",
|
||||
"ef_construction": 128,
|
||||
"m": 16,
|
||||
"parameters": {
|
||||
"ef_construction": 128,
|
||||
"m": 16,
|
||||
}
|
||||
},
|
||||
},
|
||||
"text": {"type": "text"},
|
||||
|
@ -73,7 +90,7 @@ class OpenSearchClient:
|
|||
}
|
||||
}
|
||||
self.client.indices.create(
|
||||
index=f"{self.index_prefix}_{collection_name}", body=body
|
||||
index=self._get_index_name(collection_name), body=body
|
||||
)
|
||||
|
||||
def _create_batches(self, items: list[VectorItem], batch_size=100):
|
||||
|
@ -84,38 +101,49 @@ class OpenSearchClient:
|
|||
# has_collection here means has index.
|
||||
# We are simply adapting to the norms of the other DBs.
|
||||
return self.client.indices.exists(
|
||||
index=f"{self.index_prefix}_{collection_name}"
|
||||
index=self._get_index_name(collection_name)
|
||||
)
|
||||
|
||||
def delete_colleciton(self, collection_name: str):
|
||||
def delete_collection(self, collection_name: str):
|
||||
# delete_collection here means delete index.
|
||||
# We are simply adapting to the norms of the other DBs.
|
||||
self.client.indices.delete(index=f"{self.index_prefix}_{collection_name}")
|
||||
self.client.indices.delete(index=self._get_index_name(collection_name))
|
||||
|
||||
def search(
|
||||
self, collection_name: str, vectors: list[list[float]], limit: int
|
||||
self, collection_name: str, vectors: list[list[float | int]], limit: int
|
||||
) -> Optional[SearchResult]:
|
||||
query = {
|
||||
"size": limit,
|
||||
"_source": ["text", "metadata"],
|
||||
"query": {
|
||||
"script_score": {
|
||||
"query": {"match_all": {}},
|
||||
"script": {
|
||||
"source": "cosineSimilarity(params.vector, 'vector') + 1.0",
|
||||
"params": {
|
||||
"vector": vectors[0]
|
||||
}, # Assuming single query vector
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
try:
|
||||
if not self.has_collection(collection_name):
|
||||
return None
|
||||
|
||||
query = {
|
||||
"size": limit,
|
||||
"_source": ["text", "metadata"],
|
||||
"query": {
|
||||
"script_score": {
|
||||
"query": {
|
||||
"match_all": {}
|
||||
},
|
||||
"script": {
|
||||
"source": "cosineSimilarity(params.query_value, doc[params.field]) + 1.0",
|
||||
"params": {
|
||||
"field": "vector",
|
||||
"query_value": vectors[0]
|
||||
}, # Assuming single query vector
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
result = self.client.search(
|
||||
index=self._get_index_name(collection_name),
|
||||
body=query
|
||||
)
|
||||
|
||||
result = self.client.search(
|
||||
index=f"{self.index_prefix}_{collection_name}", body=query
|
||||
)
|
||||
|
||||
return self._result_to_search_result(result)
|
||||
return self._result_to_search_result(result)
|
||||
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
def query(
|
||||
self, collection_name: str, filter: dict, limit: Optional[int] = None
|
||||
|
@ -124,18 +152,26 @@ class OpenSearchClient:
|
|||
return None
|
||||
|
||||
query_body = {
|
||||
"query": {"bool": {"filter": []}},
|
||||
"query": {
|
||||
"bool": {
|
||||
"filter": []
|
||||
}
|
||||
},
|
||||
"_source": ["text", "metadata"],
|
||||
}
|
||||
|
||||
for field, value in filter.items():
|
||||
query_body["query"]["bool"]["filter"].append({"term": {field: value}})
|
||||
query_body["query"]["bool"]["filter"].append({
|
||||
"match": {
|
||||
"metadata." + str(field): value
|
||||
}
|
||||
})
|
||||
|
||||
size = limit if limit else 10
|
||||
|
||||
try:
|
||||
result = self.client.search(
|
||||
index=f"{self.index_prefix}_{collection_name}",
|
||||
index=self._get_index_name(collection_name),
|
||||
body=query_body,
|
||||
size=size,
|
||||
)
|
||||
|
@ -146,14 +182,14 @@ class OpenSearchClient:
|
|||
return None
|
||||
|
||||
def _create_index_if_not_exists(self, collection_name: str, dimension: int):
|
||||
if not self.has_index(collection_name):
|
||||
if not self.has_collection(collection_name):
|
||||
self._create_index(collection_name, dimension)
|
||||
|
||||
def get(self, collection_name: str) -> Optional[GetResult]:
|
||||
query = {"query": {"match_all": {}}, "_source": ["text", "metadata"]}
|
||||
|
||||
result = self.client.search(
|
||||
index=f"{self.index_prefix}_{collection_name}", body=query
|
||||
index=self._get_index_name(collection_name), body=query
|
||||
)
|
||||
return self._result_to_get_result(result)
|
||||
|
||||
|
@ -165,18 +201,18 @@ class OpenSearchClient:
|
|||
for batch in self._create_batches(items):
|
||||
actions = [
|
||||
{
|
||||
"index": {
|
||||
"_id": item["id"],
|
||||
"_source": {
|
||||
"vector": item["vector"],
|
||||
"text": item["text"],
|
||||
"metadata": item["metadata"],
|
||||
},
|
||||
}
|
||||
"_op_type": "index",
|
||||
"_index": self._get_index_name(collection_name),
|
||||
"_id": item["id"],
|
||||
"_source": {
|
||||
"vector": item["vector"],
|
||||
"text": item["text"],
|
||||
"metadata": item["metadata"],
|
||||
},
|
||||
}
|
||||
for item in batch
|
||||
]
|
||||
self.client.bulk(actions)
|
||||
bulk(self.client, actions)
|
||||
|
||||
def upsert(self, collection_name: str, items: list[VectorItem]):
|
||||
self._create_index_if_not_exists(
|
||||
|
@ -186,27 +222,47 @@ class OpenSearchClient:
|
|||
for batch in self._create_batches(items):
|
||||
actions = [
|
||||
{
|
||||
"index": {
|
||||
"_id": item["id"],
|
||||
"_index": f"{self.index_prefix}_{collection_name}",
|
||||
"_source": {
|
||||
"vector": item["vector"],
|
||||
"text": item["text"],
|
||||
"metadata": item["metadata"],
|
||||
},
|
||||
}
|
||||
"_op_type": "update",
|
||||
"_index": self._get_index_name(collection_name),
|
||||
"_id": item["id"],
|
||||
"doc": {
|
||||
"vector": item["vector"],
|
||||
"text": item["text"],
|
||||
"metadata": item["metadata"],
|
||||
},
|
||||
"doc_as_upsert": True,
|
||||
}
|
||||
for item in batch
|
||||
]
|
||||
self.client.bulk(actions)
|
||||
|
||||
def delete(self, collection_name: str, ids: list[str]):
|
||||
actions = [
|
||||
{"delete": {"_index": f"{self.index_prefix}_{collection_name}", "_id": id}}
|
||||
for id in ids
|
||||
]
|
||||
self.client.bulk(body=actions)
|
||||
bulk(self.client, actions)
|
||||
|
||||
def delete(self, collection_name: str, ids: Optional[list[str]] = None, filter: Optional[dict] = None):
|
||||
if ids:
|
||||
actions = [
|
||||
{
|
||||
"_op_type": "delete",
|
||||
"_index": self._get_index_name(collection_name),
|
||||
"_id": id,
|
||||
}
|
||||
for id in ids
|
||||
]
|
||||
bulk(self.client, actions)
|
||||
elif filter:
|
||||
query_body = {
|
||||
"query": {
|
||||
"bool": {
|
||||
"filter": []
|
||||
}
|
||||
},
|
||||
}
|
||||
for field, value in filter.items():
|
||||
query_body["query"]["bool"]["filter"].append({
|
||||
"match": {
|
||||
"metadata." + str(field): value
|
||||
}
|
||||
})
|
||||
self.client.delete_by_query(index=self._get_index_name(collection_name), body=query_body)
|
||||
|
||||
def reset(self):
|
||||
indices = self.client.indices.get(index=f"{self.index_prefix}_*")
|
||||
for index in indices:
|
||||
|
|
|
@ -24,6 +24,7 @@ from langchain_community.document_loaders import PlaywrightURLLoader, WebBaseLoa
|
|||
from langchain_community.document_loaders.firecrawl import FireCrawlLoader
|
||||
from langchain_community.document_loaders.base import BaseLoader
|
||||
from langchain_core.documents import Document
|
||||
from open_webui.retrieval.loaders.tavily import TavilyLoader
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.config import (
|
||||
ENABLE_RAG_LOCAL_WEB_FETCH,
|
||||
|
@ -31,6 +32,8 @@ from open_webui.config import (
|
|||
RAG_WEB_LOADER_ENGINE,
|
||||
FIRECRAWL_API_BASE_URL,
|
||||
FIRECRAWL_API_KEY,
|
||||
TAVILY_API_KEY,
|
||||
TAVILY_EXTRACT_DEPTH,
|
||||
)
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
|
@ -113,7 +116,47 @@ def verify_ssl_cert(url: str) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
class SafeFireCrawlLoader(BaseLoader):
|
||||
class RateLimitMixin:
|
||||
async def _wait_for_rate_limit(self):
|
||||
"""Wait to respect the rate limit if specified."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
await asyncio.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
def _sync_wait_for_rate_limit(self):
|
||||
"""Synchronous version of rate limit wait."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
time.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
|
||||
class URLProcessingMixin:
|
||||
def _verify_ssl_cert(self, url: str) -> bool:
|
||||
"""Verify SSL certificate for a URL."""
|
||||
return verify_ssl_cert(url)
|
||||
|
||||
async def _safe_process_url(self, url: str) -> bool:
|
||||
"""Perform safety checks before processing a URL."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
await self._wait_for_rate_limit()
|
||||
return True
|
||||
|
||||
def _safe_process_url_sync(self, url: str) -> bool:
|
||||
"""Synchronous version of safety checks."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
self._sync_wait_for_rate_limit()
|
||||
return True
|
||||
|
||||
|
||||
class SafeFireCrawlLoader(BaseLoader, RateLimitMixin, URLProcessingMixin):
|
||||
def __init__(
|
||||
self,
|
||||
web_paths,
|
||||
|
@ -208,43 +251,120 @@ class SafeFireCrawlLoader(BaseLoader):
|
|||
continue
|
||||
raise e
|
||||
|
||||
def _verify_ssl_cert(self, url: str) -> bool:
|
||||
return verify_ssl_cert(url)
|
||||
|
||||
async def _wait_for_rate_limit(self):
|
||||
"""Wait to respect the rate limit if specified."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
await asyncio.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
class SafeTavilyLoader(BaseLoader, RateLimitMixin, URLProcessingMixin):
|
||||
def __init__(
|
||||
self,
|
||||
web_paths: Union[str, List[str]],
|
||||
api_key: str,
|
||||
extract_depth: Literal["basic", "advanced"] = "basic",
|
||||
continue_on_failure: bool = True,
|
||||
requests_per_second: Optional[float] = None,
|
||||
verify_ssl: bool = True,
|
||||
trust_env: bool = False,
|
||||
proxy: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""Initialize SafeTavilyLoader with rate limiting and SSL verification support.
|
||||
|
||||
def _sync_wait_for_rate_limit(self):
|
||||
"""Synchronous version of rate limit wait."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
time.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
Args:
|
||||
web_paths: List of URLs/paths to process.
|
||||
api_key: The Tavily API key.
|
||||
extract_depth: Depth of extraction ("basic" or "advanced").
|
||||
continue_on_failure: Whether to continue if extraction of a URL fails.
|
||||
requests_per_second: Number of requests per second to limit to.
|
||||
verify_ssl: If True, verify SSL certificates.
|
||||
trust_env: If True, use proxy settings from environment variables.
|
||||
proxy: Optional proxy configuration.
|
||||
"""
|
||||
# Initialize proxy configuration if using environment variables
|
||||
proxy_server = proxy.get("server") if proxy else None
|
||||
if trust_env and not proxy_server:
|
||||
env_proxies = urllib.request.getproxies()
|
||||
env_proxy_server = env_proxies.get("https") or env_proxies.get("http")
|
||||
if env_proxy_server:
|
||||
if proxy:
|
||||
proxy["server"] = env_proxy_server
|
||||
else:
|
||||
proxy = {"server": env_proxy_server}
|
||||
|
||||
# Store parameters for creating TavilyLoader instances
|
||||
self.web_paths = web_paths if isinstance(web_paths, list) else [web_paths]
|
||||
self.api_key = api_key
|
||||
self.extract_depth = extract_depth
|
||||
self.continue_on_failure = continue_on_failure
|
||||
self.verify_ssl = verify_ssl
|
||||
self.trust_env = trust_env
|
||||
self.proxy = proxy
|
||||
|
||||
# Add rate limiting
|
||||
self.requests_per_second = requests_per_second
|
||||
self.last_request_time = None
|
||||
|
||||
async def _safe_process_url(self, url: str) -> bool:
|
||||
"""Perform safety checks before processing a URL."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
await self._wait_for_rate_limit()
|
||||
return True
|
||||
|
||||
def _safe_process_url_sync(self, url: str) -> bool:
|
||||
"""Synchronous version of safety checks."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
self._sync_wait_for_rate_limit()
|
||||
return True
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Load documents with rate limiting support, delegating to TavilyLoader."""
|
||||
valid_urls = []
|
||||
for url in self.web_paths:
|
||||
try:
|
||||
self._safe_process_url_sync(url)
|
||||
valid_urls.append(url)
|
||||
except Exception as e:
|
||||
log.warning(f"SSL verification failed for {url}: {str(e)}")
|
||||
if not self.continue_on_failure:
|
||||
raise e
|
||||
if not valid_urls:
|
||||
if self.continue_on_failure:
|
||||
log.warning("No valid URLs to process after SSL verification")
|
||||
return
|
||||
raise ValueError("No valid URLs to process after SSL verification")
|
||||
try:
|
||||
loader = TavilyLoader(
|
||||
urls=valid_urls,
|
||||
api_key=self.api_key,
|
||||
extract_depth=self.extract_depth,
|
||||
continue_on_failure=self.continue_on_failure,
|
||||
)
|
||||
yield from loader.lazy_load()
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
log.exception(e, "Error extracting content from URLs")
|
||||
else:
|
||||
raise e
|
||||
|
||||
async def alazy_load(self) -> AsyncIterator[Document]:
|
||||
"""Async version with rate limiting and SSL verification."""
|
||||
valid_urls = []
|
||||
for url in self.web_paths:
|
||||
try:
|
||||
await self._safe_process_url(url)
|
||||
valid_urls.append(url)
|
||||
except Exception as e:
|
||||
log.warning(f"SSL verification failed for {url}: {str(e)}")
|
||||
if not self.continue_on_failure:
|
||||
raise e
|
||||
|
||||
if not valid_urls:
|
||||
if self.continue_on_failure:
|
||||
log.warning("No valid URLs to process after SSL verification")
|
||||
return
|
||||
raise ValueError("No valid URLs to process after SSL verification")
|
||||
|
||||
try:
|
||||
loader = TavilyLoader(
|
||||
urls=valid_urls,
|
||||
api_key=self.api_key,
|
||||
extract_depth=self.extract_depth,
|
||||
continue_on_failure=self.continue_on_failure,
|
||||
)
|
||||
async for document in loader.alazy_load():
|
||||
yield document
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
log.exception(e, "Error loading URLs")
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
class SafePlaywrightURLLoader(PlaywrightURLLoader):
|
||||
class SafePlaywrightURLLoader(PlaywrightURLLoader, RateLimitMixin, URLProcessingMixin):
|
||||
"""Load HTML pages safely with Playwright, supporting SSL verification, rate limiting, and remote browser connection.
|
||||
|
||||
Attributes:
|
||||
|
@ -356,40 +476,6 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
|
|||
raise e
|
||||
await browser.close()
|
||||
|
||||
def _verify_ssl_cert(self, url: str) -> bool:
|
||||
return verify_ssl_cert(url)
|
||||
|
||||
async def _wait_for_rate_limit(self):
|
||||
"""Wait to respect the rate limit if specified."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
await asyncio.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
def _sync_wait_for_rate_limit(self):
|
||||
"""Synchronous version of rate limit wait."""
|
||||
if self.requests_per_second and self.last_request_time:
|
||||
min_interval = timedelta(seconds=1.0 / self.requests_per_second)
|
||||
time_since_last = datetime.now() - self.last_request_time
|
||||
if time_since_last < min_interval:
|
||||
time.sleep((min_interval - time_since_last).total_seconds())
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
async def _safe_process_url(self, url: str) -> bool:
|
||||
"""Perform safety checks before processing a URL."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
await self._wait_for_rate_limit()
|
||||
return True
|
||||
|
||||
def _safe_process_url_sync(self, url: str) -> bool:
|
||||
"""Synchronous version of safety checks."""
|
||||
if self.verify_ssl and not self._verify_ssl_cert(url):
|
||||
raise ValueError(f"SSL certificate verification failed for {url}")
|
||||
self._sync_wait_for_rate_limit()
|
||||
return True
|
||||
|
||||
|
||||
class SafeWebBaseLoader(WebBaseLoader):
|
||||
|
@ -499,6 +585,7 @@ RAG_WEB_LOADER_ENGINES = defaultdict(lambda: SafeWebBaseLoader)
|
|||
RAG_WEB_LOADER_ENGINES["playwright"] = SafePlaywrightURLLoader
|
||||
RAG_WEB_LOADER_ENGINES["safe_web"] = SafeWebBaseLoader
|
||||
RAG_WEB_LOADER_ENGINES["firecrawl"] = SafeFireCrawlLoader
|
||||
RAG_WEB_LOADER_ENGINES["tavily"] = SafeTavilyLoader
|
||||
|
||||
|
||||
def get_web_loader(
|
||||
|
@ -525,6 +612,10 @@ def get_web_loader(
|
|||
web_loader_args["api_key"] = FIRECRAWL_API_KEY.value
|
||||
web_loader_args["api_url"] = FIRECRAWL_API_BASE_URL.value
|
||||
|
||||
if RAG_WEB_LOADER_ENGINE.value == "tavily":
|
||||
web_loader_args["api_key"] = TAVILY_API_KEY.value
|
||||
web_loader_args["extract_depth"] = TAVILY_EXTRACT_DEPTH.value
|
||||
|
||||
# Create the appropriate WebLoader based on the configuration
|
||||
WebLoaderClass = RAG_WEB_LOADER_ENGINES[RAG_WEB_LOADER_ENGINE.value]
|
||||
web_loader = WebLoaderClass(**web_loader_args)
|
||||
|
|
|
@ -210,7 +210,7 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
|
|||
LDAP_APP_DN,
|
||||
LDAP_APP_PASSWORD,
|
||||
auto_bind="NONE",
|
||||
authentication="SIMPLE",
|
||||
authentication="SIMPLE" if LDAP_APP_DN else "ANONYMOUS",
|
||||
)
|
||||
if not connection_app.bind():
|
||||
raise HTTPException(400, detail="Application account bind failed")
|
||||
|
|
|
@ -36,6 +36,9 @@ from open_webui.utils.payload import (
|
|||
apply_model_params_to_body_openai,
|
||||
apply_model_system_prompt_to_body,
|
||||
)
|
||||
from open_webui.utils.misc import (
|
||||
convert_logit_bias_input_to_json,
|
||||
)
|
||||
|
||||
from open_webui.utils.auth import get_admin_user, get_verified_user
|
||||
from open_webui.utils.access_control import has_access
|
||||
|
@ -396,6 +399,7 @@ async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
|
|||
|
||||
for idx, models in enumerate(model_lists):
|
||||
if models is not None and "error" not in models:
|
||||
|
||||
merged_list.extend(
|
||||
[
|
||||
{
|
||||
|
@ -406,18 +410,21 @@ async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
|
|||
"urlIdx": idx,
|
||||
}
|
||||
for model in models
|
||||
if "api.openai.com"
|
||||
not in request.app.state.config.OPENAI_API_BASE_URLS[idx]
|
||||
or not any(
|
||||
name in model["id"]
|
||||
for name in [
|
||||
"babbage",
|
||||
"dall-e",
|
||||
"davinci",
|
||||
"embedding",
|
||||
"tts",
|
||||
"whisper",
|
||||
]
|
||||
if (model.get("id") or model.get("name"))
|
||||
and (
|
||||
"api.openai.com"
|
||||
not in request.app.state.config.OPENAI_API_BASE_URLS[idx]
|
||||
or not any(
|
||||
name in model["id"]
|
||||
for name in [
|
||||
"babbage",
|
||||
"dall-e",
|
||||
"davinci",
|
||||
"embedding",
|
||||
"tts",
|
||||
"whisper",
|
||||
]
|
||||
)
|
||||
)
|
||||
]
|
||||
)
|
||||
|
@ -666,6 +673,11 @@ async def generate_chat_completion(
|
|||
del payload["max_tokens"]
|
||||
|
||||
# Convert the modified body back to JSON
|
||||
if "logit_bias" in payload:
|
||||
payload["logit_bias"] = json.loads(
|
||||
convert_logit_bias_input_to_json(payload["logit_bias"])
|
||||
)
|
||||
|
||||
payload = json.dumps(payload)
|
||||
|
||||
r = None
|
||||
|
|
|
@ -189,17 +189,15 @@ async def chat_completion_tools_handler(
|
|||
tool_function_params = tool_call.get("parameters", {})
|
||||
|
||||
try:
|
||||
required_params = (
|
||||
tools[tool_function_name]
|
||||
.get("spec", {})
|
||||
.get("parameters", {})
|
||||
.get("required", [])
|
||||
spec = tools[tool_function_name].get("spec", {})
|
||||
allowed_params = (
|
||||
spec.get("parameters", {}).get("properties", {}).keys()
|
||||
)
|
||||
tool_function = tools[tool_function_name]["callable"]
|
||||
tool_function_params = {
|
||||
k: v
|
||||
for k, v in tool_function_params.items()
|
||||
if k in required_params
|
||||
if k in allowed_params
|
||||
}
|
||||
tool_output = await tool_function(**tool_function_params)
|
||||
|
||||
|
@ -1765,14 +1763,16 @@ async def process_chat_response(
|
|||
spec = tool.get("spec", {})
|
||||
|
||||
try:
|
||||
required_params = spec.get("parameters", {}).get(
|
||||
"required", []
|
||||
allowed_params = (
|
||||
spec.get("parameters", {})
|
||||
.get("properties", {})
|
||||
.keys()
|
||||
)
|
||||
tool_function = tool["callable"]
|
||||
tool_function_params = {
|
||||
k: v
|
||||
for k, v in tool_function_params.items()
|
||||
if k in required_params
|
||||
if k in allowed_params
|
||||
}
|
||||
tool_result = await tool_function(
|
||||
**tool_function_params
|
||||
|
|
|
@ -179,7 +179,7 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<Tooltip content="Verify Connection" className="self-end -mb-1">
|
||||
<Tooltip content={$i18n.t('Verify Connection')} className="self-end -mb-1">
|
||||
<button
|
||||
class="self-center p-1 bg-transparent hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
|
||||
on:click={() => {
|
||||
|
|
|
@ -406,8 +406,12 @@
|
|||
<div class="flex items-center relative">
|
||||
<Tooltip
|
||||
content={BYPASS_EMBEDDING_AND_RETRIEVAL
|
||||
? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
|
||||
? $i18n.t(
|
||||
'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
)
|
||||
: $i18n.t(
|
||||
'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
|
||||
)}
|
||||
>
|
||||
<Switch bind:state={BYPASS_EMBEDDING_AND_RETRIEVAL} />
|
||||
</Tooltip>
|
||||
|
@ -644,8 +648,12 @@
|
|||
<div class="flex items-center relative">
|
||||
<Tooltip
|
||||
content={RAG_FULL_CONTEXT
|
||||
? 'Inject entire contents as context for comprehensive processing, this is recommended for complex queries.'
|
||||
: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
|
||||
? $i18n.t(
|
||||
'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
)
|
||||
: $i18n.t(
|
||||
'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
|
||||
)}
|
||||
>
|
||||
<Switch bind:state={RAG_FULL_CONTEXT} />
|
||||
</Tooltip>
|
||||
|
|
|
@ -191,11 +191,15 @@
|
|||
}
|
||||
|
||||
if (config.comfyui.COMFYUI_WORKFLOW) {
|
||||
config.comfyui.COMFYUI_WORKFLOW = JSON.stringify(
|
||||
JSON.parse(config.comfyui.COMFYUI_WORKFLOW),
|
||||
null,
|
||||
2
|
||||
);
|
||||
try {
|
||||
config.comfyui.COMFYUI_WORKFLOW = JSON.stringify(
|
||||
JSON.parse(config.comfyui.COMFYUI_WORKFLOW),
|
||||
null,
|
||||
2
|
||||
);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
}
|
||||
|
||||
requiredWorkflowNodes = requiredWorkflowNodes.map((node) => {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
if (modelListElement) {
|
||||
sortable = Sortable.create(modelListElement, {
|
||||
animation: 150,
|
||||
handle: '.item-handle',
|
||||
onUpdate: async (event) => {
|
||||
positionChangeHandler();
|
||||
}
|
||||
|
@ -47,7 +48,7 @@
|
|||
<div class=" flex gap-2 w-full justify-between items-center" id="model-item-{modelId}">
|
||||
<Tooltip content={modelId} placement="top-start">
|
||||
<div class="flex items-center gap-1">
|
||||
<EllipsisVertical className="size-4 cursor-move" />
|
||||
<EllipsisVertical className="size-4 cursor-move item-handle" />
|
||||
|
||||
<div class=" text-sm flex-1 py-1 rounded-lg">
|
||||
{#if $models.find((model) => model.id === modelId)}
|
||||
|
|
|
@ -462,8 +462,12 @@
|
|||
<div class="flex items-center relative">
|
||||
<Tooltip
|
||||
content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
|
||||
? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
|
||||
? $i18n.t(
|
||||
'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
)
|
||||
: $i18n.t(
|
||||
'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
|
||||
)}
|
||||
>
|
||||
<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
|
||||
</Tooltip>
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
import Users from './Users.svelte';
|
||||
import UserPlusSolid from '$lib/components/icons/UserPlusSolid.svelte';
|
||||
import WrenchSolid from '$lib/components/icons/WrenchSolid.svelte';
|
||||
import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
|
||||
|
||||
export let onSubmit: Function = () => {};
|
||||
export let onDelete: Function = () => {};
|
||||
|
@ -25,6 +26,7 @@
|
|||
|
||||
let selectedTab = 'general';
|
||||
let loading = false;
|
||||
let showDeleteConfirmDialog = false;
|
||||
|
||||
export let name = '';
|
||||
export let description = '';
|
||||
|
@ -88,6 +90,14 @@
|
|||
});
|
||||
</script>
|
||||
|
||||
<ConfirmDialog
|
||||
bind:show={showDeleteConfirmDialog}
|
||||
on:confirm={() => {
|
||||
onDelete();
|
||||
show = false;
|
||||
}}
|
||||
/>
|
||||
|
||||
<Modal size="md" bind:show>
|
||||
<div>
|
||||
<div class=" flex justify-between dark:text-gray-100 px-5 pt-4 mb-1.5">
|
||||
|
@ -263,14 +273,13 @@
|
|||
{/if}
|
||||
</div> -->
|
||||
|
||||
<div class="flex justify-end pt-3 text-sm font-medium gap-1.5">
|
||||
<div class="flex justify-between pt-3 text-sm font-medium gap-1.5">
|
||||
{#if edit}
|
||||
<button
|
||||
class="px-3.5 py-1.5 text-sm font-medium dark:bg-black dark:hover:bg-gray-900 dark:text-white bg-white text-black hover:bg-gray-100 transition rounded-full flex flex-row space-x-1 items-center"
|
||||
type="button"
|
||||
on:click={() => {
|
||||
onDelete();
|
||||
show = false;
|
||||
showDeleteConfirmDialog = true;
|
||||
}}
|
||||
>
|
||||
{$i18n.t('Delete')}
|
||||
|
|
|
@ -73,10 +73,13 @@
|
|||
<div class="text-2xl font-medium capitalize">{channel.name}</div>
|
||||
|
||||
<div class=" text-gray-500">
|
||||
This channel was created on {dayjs(channel.created_at / 1000000).format(
|
||||
'MMMM D, YYYY'
|
||||
)}. This is the very beginning of the {channel.name}
|
||||
channel.
|
||||
{$i18n.t(
|
||||
'This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.',
|
||||
{
|
||||
createdAt: dayjs(channel.created_at / 1000000).format('MMMM D, YYYY'),
|
||||
channelName: channel.name
|
||||
}
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{:else}
|
||||
|
|
|
@ -212,7 +212,14 @@
|
|||
const _chatId = JSON.parse(JSON.stringify($chatId));
|
||||
let _messageId = JSON.parse(JSON.stringify(message.id));
|
||||
|
||||
let messageChildrenIds = history.messages[_messageId].childrenIds;
|
||||
let messageChildrenIds = [];
|
||||
if (_messageId === null) {
|
||||
messageChildrenIds = Object.keys(history.messages).filter(
|
||||
(id) => history.messages[id].parentId === null
|
||||
);
|
||||
} else {
|
||||
messageChildrenIds = history.messages[_messageId].childrenIds;
|
||||
}
|
||||
|
||||
while (messageChildrenIds.length !== 0) {
|
||||
_messageId = messageChildrenIds.at(-1);
|
||||
|
|
|
@ -124,7 +124,7 @@
|
|||
</div>
|
||||
{:else}
|
||||
<Collapsible
|
||||
id="collapsible-sources"
|
||||
id={`collapsible-${id}`}
|
||||
bind:open={isCollapsibleOpen}
|
||||
className="w-full max-w-full "
|
||||
buttonClassName="w-fit max-w-full"
|
||||
|
|
|
@ -441,7 +441,9 @@
|
|||
|
||||
{#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))}
|
||||
{#if executing}
|
||||
<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
|
||||
<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">
|
||||
{$i18n.t('Running')}
|
||||
</div>
|
||||
{:else if run}
|
||||
<button
|
||||
class="flex gap-1 items-center run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
|
||||
|
|
|
@ -748,7 +748,9 @@
|
|||
onSourceClick={async (id, idx) => {
|
||||
console.log(id, idx);
|
||||
let sourceButton = document.getElementById(`source-${message.id}-${idx}`);
|
||||
const sourcesCollapsible = document.getElementById(`collapsible-sources`);
|
||||
const sourcesCollapsible = document.getElementById(
|
||||
`collapsible-${message.id}`
|
||||
);
|
||||
|
||||
if (sourceButton) {
|
||||
sourceButton.click();
|
||||
|
|
|
@ -347,7 +347,7 @@
|
|||
</button>
|
||||
</Tooltip>
|
||||
|
||||
{#if !isFirstMessage && !readOnly}
|
||||
{#if !readOnly && siblings.length > 1}
|
||||
<Tooltip content={$i18n.t('Delete')} placement="bottom">
|
||||
<button
|
||||
class="invisible group-hover:visible p-1 rounded-sm dark:hover:text-white hover:text-black transition"
|
||||
|
|
|
@ -342,6 +342,28 @@
|
|||
{$i18n.t('All')}
|
||||
</button>
|
||||
|
||||
<button
|
||||
class="min-w-fit outline-none p-1.5 {selectedTag === ''
|
||||
? ''
|
||||
: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
|
||||
on:click={() => {
|
||||
selectedTag = '';
|
||||
}}
|
||||
>
|
||||
{$i18n.t('Ollama')}
|
||||
</button>
|
||||
|
||||
<button
|
||||
class="min-w-fit outline-none p-1.5 {selectedTag === ''
|
||||
? ''
|
||||
: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
|
||||
on:click={() => {
|
||||
selectedTag = '';
|
||||
}}
|
||||
>
|
||||
{$i18n.t('OpenAI')}
|
||||
</button>
|
||||
|
||||
{#each tags as tag}
|
||||
<button
|
||||
class="min-w-fit outline-none p-1.5 {selectedTag === tag
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
export let state = 'unchecked';
|
||||
export let indeterminate = false;
|
||||
export let disabled = false;
|
||||
|
||||
let _state = 'unchecked';
|
||||
|
||||
|
@ -14,8 +15,10 @@
|
|||
class=" outline -outline-offset-1 outline-[1.5px] outline-gray-200 dark:outline-gray-600 {state !==
|
||||
'unchecked'
|
||||
? 'bg-black outline-black '
|
||||
: 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative"
|
||||
: 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative {disabled ? 'opacity-50 cursor-not-allowed' : ''}"
|
||||
on:click={() => {
|
||||
if (disabled) return;
|
||||
|
||||
if (_state === 'unchecked') {
|
||||
_state = 'checked';
|
||||
dispatch('change', _state);
|
||||
|
@ -30,6 +33,7 @@
|
|||
}
|
||||
}}
|
||||
type="button"
|
||||
{disabled}
|
||||
>
|
||||
<div class="top-0 left-0 absolute w-full flex justify-center">
|
||||
{#if _state === 'checked'}
|
||||
|
|
|
@ -87,8 +87,12 @@
|
|||
<div>
|
||||
<Tooltip
|
||||
content={enableFullContent
|
||||
? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
|
||||
? $i18n.t(
|
||||
'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
|
||||
)
|
||||
: $i18n.t(
|
||||
'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
|
||||
)}
|
||||
>
|
||||
<div class="flex items-center gap-1.5 text-xs">
|
||||
{#if enableFullContent}
|
||||
|
|
|
@ -39,10 +39,13 @@
|
|||
<div class=" flex items-center gap-2 mr-3">
|
||||
<div class="self-center flex items-center">
|
||||
<Checkbox
|
||||
state={_filters[filter].selected ? 'checked' : 'unchecked'}
|
||||
state={_filters[filter].is_global ? 'checked' : (_filters[filter].selected ? 'checked' : 'unchecked')}
|
||||
disabled={_filters[filter].is_global}
|
||||
on:change={(e) => {
|
||||
_filters[filter].selected = e.detail === 'checked';
|
||||
selectedFilterIds = Object.keys(_filters).filter((t) => _filters[t].selected);
|
||||
if (!_filters[filter].is_global) {
|
||||
_filters[filter].selected = e.detail === 'checked';
|
||||
selectedFilterIds = Object.keys(_filters).filter((t) => _filters[t].selected);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
|
|
@ -113,8 +113,8 @@
|
|||
}
|
||||
}}
|
||||
>
|
||||
<option class=" text-gray-700" value="private" selected>Private</option>
|
||||
<option class=" text-gray-700" value="public" selected>Public</option>
|
||||
<option class=" text-gray-700" value="private" selected>{$i18n.t('Private')}</option>
|
||||
<option class=" text-gray-700" value="public" selected>{$i18n.t('Public')}</option>
|
||||
</select>
|
||||
|
||||
<div class=" text-xs text-gray-400 font-medium">
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "الإفتراضي صلاحيات المستخدم",
|
||||
"Delete": "حذف",
|
||||
"Delete a model": "حذف الموديل",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "معلومات",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "إدخال الأوامر",
|
||||
"Install from Github URL": "التثبيت من عنوان URL لجيثب",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "أخر 30 يوم",
|
||||
"Previous 7 days": "أخر 7 أيام",
|
||||
"Private": "",
|
||||
"Profile Image": "صورة الملف الشخصي",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "مطالبات",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
|
||||
"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "الثيم",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "المتغير",
|
||||
"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
|
||||
"Verify Connection": "",
|
||||
"Version": "إصدار",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Промпт Предложения по подразбиране",
|
||||
"Default to 389 or 636 if TLS is enabled": "По подразбиране 389 или 636, ако TLS е активиран",
|
||||
"Default to ALL": "По подразбиране за ВСИЧКИ",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Роля на потребителя по подразбиране",
|
||||
"Delete": "Изтриване",
|
||||
"Delete a model": "Изтриване на модел",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Информация",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Въведете команди",
|
||||
"Install from Github URL": "Инсталиране от URL адреса на Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Наказание за присъствие",
|
||||
"Previous 30 days": "Предишните 30 дни",
|
||||
"Previous 7 days": "Предишните 7 дни",
|
||||
"Private": "",
|
||||
"Profile Image": "Профилна снимка",
|
||||
"Prompt": "Промпт",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (напр. Кажи ми забавен факт за Римската империя)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Промптът е актуализиран успешно",
|
||||
"Prompts": "Промптове",
|
||||
"Prompts Access": "Достъп до промптове",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
|
||||
"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
|
||||
"Query Generation Prompt": "Промпт за генериране на запитвания",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Тема",
|
||||
"Thinking...": "Мисля...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Клапаните са актуализирани успешно",
|
||||
"variable": "променлива",
|
||||
"variable to have them replaced with clipboard content.": "променлива, за да бъдат заменени със съдържанието от клипборда.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Версия",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} от {{totalVersions}}",
|
||||
"View Replies": "Преглед на отговорите",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "ডিফল্ট প্রম্পট সাজেশন",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "ইউজারের ডিফল্ট পদবি",
|
||||
"Delete": "মুছে ফেলুন",
|
||||
"Delete a model": "একটি মডেল মুছে ফেলুন",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "তথ্য",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "ইনপুট কমান্ডস",
|
||||
"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "পূর্ব ৩০ দিন",
|
||||
"Previous 7 days": "পূর্ব ৭ দিন",
|
||||
"Private": "",
|
||||
"Profile Image": "প্রোফাইল ইমেজ",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "প্রম্প্ট (উদাহরণস্বরূপ, আমি রোমান ইমপার্টের সম্পর্কে একটি উপস্থিতি জানতে বল)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "প্রম্পটসমূহ",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
|
||||
"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "থিম",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "ভেরিয়েবল",
|
||||
"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
|
||||
"Verify Connection": "",
|
||||
"Version": "ভার্সন",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Suggeriments d'indicació per defecte",
|
||||
"Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat",
|
||||
"Default to ALL": "Per defecte TOTS",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Rol d'usuari per defecte",
|
||||
"Delete": "Eliminar",
|
||||
"Delete a model": "Eliminar un model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.",
|
||||
"Info": "Informació",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Entra comandes",
|
||||
"Install from Github URL": "Instal·lar des de l'URL de Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Penalització de presència",
|
||||
"Previous 30 days": "30 dies anteriors",
|
||||
"Previous 7 days": "7 dies anteriors",
|
||||
"Private": "",
|
||||
"Profile Image": "Imatge de perfil",
|
||||
"Prompt": "Indicació",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Indicació actualitzada correctament",
|
||||
"Prompts": "Indicacions",
|
||||
"Prompts Access": "Accés a les indicacions",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
|
||||
"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
|
||||
"Query Generation Prompt": "Indicació per a generació de consulta",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Pensant...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Valves actualitat correctament",
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versió",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}",
|
||||
"View Replies": "Veure les respostes",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Default nga prompt nga mga sugyot",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Default nga Papel sa Gumagamit",
|
||||
"Delete": "",
|
||||
"Delete a model": "Pagtangtang sa usa ka template",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Pagsulod sa input commands",
|
||||
"Install from Github URL": "",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "",
|
||||
"Previous 7 days": "",
|
||||
"Private": "",
|
||||
"Profile Image": "",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Mga aghat",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable aron pulihan kini sa mga sulud sa clipboard.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Bersyon",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Výchozí návrhy promptů",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Výchozí uživatelská role",
|
||||
"Delete": "Smazat",
|
||||
"Delete a model": "Odstranit model.",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Vstupní příkazy",
|
||||
"Install from Github URL": "Instalace z URL adresy Githubu",
|
||||
"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Předchozích 30 dnů",
|
||||
"Previous 7 days": "Předchozích 7 dní",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilový obrázek",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (např. Řekni mi zábavný fakt o Římské říši)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompty",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
|
||||
"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Téma",
|
||||
"Thinking...": "Přemýšlím...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Ventily byly úspěšně aktualizovány.",
|
||||
"variable": "proměnná",
|
||||
"variable to have them replaced with clipboard content.": "proměnnou, aby byl jejich obsah nahrazen obsahem schránky.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Verze",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Verze {{selectedVersion}} z {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Standardforslag til prompt",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Brugers rolle som standard",
|
||||
"Delete": "Slet",
|
||||
"Delete a model": "Slet en model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Inputkommandoer",
|
||||
"Install from Github URL": "Installer fra Github URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Seneste 30 dage",
|
||||
"Previous 7 days": "Seneste 7 dage",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilbillede",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (f.eks. Fortæl mig en sjov kendsgerning om Romerriget)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
|
||||
"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Tænker...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Ventiler opdateret.",
|
||||
"variable": "variabel",
|
||||
"variable to have them replaced with clipboard content.": "variabel for at få dem erstattet med indholdet af udklipsholderen.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} af {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Prompt-Vorschläge",
|
||||
"Default to 389 or 636 if TLS is enabled": "Standardmäßig auf 389 oder 636 setzen, wenn TLS aktiviert ist",
|
||||
"Default to ALL": "Standardmäßig auf ALLE setzen",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Standardbenutzerrolle",
|
||||
"Delete": "Löschen",
|
||||
"Delete a model": "Ein Modell löschen",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Eingabebefehle",
|
||||
"Install from Github URL": "Installiere von der Github-URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Vorherige 30 Tage",
|
||||
"Previous 7 days": "Vorherige 7 Tage",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilbild",
|
||||
"Prompt": "Prompt",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "Prompt-Zugriff",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
|
||||
"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
|
||||
"Query Generation Prompt": "Abfragegenerierungsprompt",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Design",
|
||||
"Thinking...": "Denke nach...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Valves erfolgreich aktualisiert",
|
||||
"variable": "Variable",
|
||||
"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}",
|
||||
"View Replies": "Antworten anzeigen",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Default Prompt Suggestions",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Default User Role",
|
||||
"Delete": "",
|
||||
"Delete a model": "Delete a model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Input commands",
|
||||
"Install from Github URL": "",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "",
|
||||
"Previous 7 days": "",
|
||||
"Private": "",
|
||||
"Profile Image": "",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Promptos",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Theme much theme",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "variable very variable",
|
||||
"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version much version",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Προεπιλεγμένες Προτάσεις Προτροπής",
|
||||
"Default to 389 or 636 if TLS is enabled": "Προεπιλογή στο 389 ή 636 εάν είναι ενεργοποιημένο το TLS",
|
||||
"Default to ALL": "Προεπιλογή σε ΟΛΑ",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Προεπιλεγμένος Ρόλος Χρήστη",
|
||||
"Delete": "Διαγραφή",
|
||||
"Delete a model": "Διαγραφή ενός μοντέλου",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Πληροφορίες",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Εισαγωγή εντολών",
|
||||
"Install from Github URL": "Εγκατάσταση από URL Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Προηγούμενες 30 ημέρες",
|
||||
"Previous 7 days": "Προηγούμενες 7 ημέρες",
|
||||
"Private": "",
|
||||
"Profile Image": "Εικόνα Προφίλ",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Προτροπή (π.χ. Πες μου ένα διασκεδαστικό γεγονός για την Ρωμαϊκή Αυτοκρατορία)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
|
||||
"Prompts": "Προτροπές",
|
||||
"Prompts Access": "Πρόσβαση Προτροπών",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
|
||||
"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
|
||||
"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Θέμα",
|
||||
"Thinking...": "Σκέφτομαι...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Οι βαλβίδες ενημερώθηκαν με επιτυχία",
|
||||
"variable": "μεταβλητή",
|
||||
"variable to have them replaced with clipboard content.": "μεταβλητή να αντικατασταθούν με το περιεχόμενο του πρόχειρου.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Έκδοση",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Έκδοση {{selectedVersion}} από {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "",
|
||||
"Delete": "",
|
||||
"Delete a model": "",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "",
|
||||
"Install from Github URL": "",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "",
|
||||
"Previous 7 days": "",
|
||||
"Private": "",
|
||||
"Profile Image": "",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Verify Connection": "",
|
||||
"Version": "",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "",
|
||||
"Delete": "",
|
||||
"Delete a model": "",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "",
|
||||
"Install from Github URL": "",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "",
|
||||
"Previous 7 days": "",
|
||||
"Private": "",
|
||||
"Profile Image": "",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Verify Connection": "",
|
||||
"Version": "",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Sugerencias de mensajes por defecto",
|
||||
"Default to 389 or 636 if TLS is enabled": "Predeterminado a 389 o 636 si TLS está habilitado",
|
||||
"Default to ALL": "Predeterminado a TODOS",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Rol por defecto para usuarios",
|
||||
"Delete": "Borrar",
|
||||
"Delete a model": "Borra un modelo",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Información",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Ingresar comandos",
|
||||
"Install from Github URL": "Instalar desde la URL de Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Auto-Enviar Después de la Transcripción de Voz",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Últimos 30 días",
|
||||
"Previous 7 days": "Últimos 7 días",
|
||||
"Private": "",
|
||||
"Profile Image": "Imagen de perfil",
|
||||
"Prompt": "Prompt",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por ejemplo, cuéntame una cosa divertida sobre el Imperio Romano)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt actualizado exitosamente",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "Acceso a Prompts",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Extraer \"{{searchValue}}\" de Ollama.com",
|
||||
"Pull a model from Ollama.com": "Obtener un modelo de Ollama.com",
|
||||
"Query Generation Prompt": "Prompt de generación de consulta",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Pensando...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Esta acción no se puede deshacer. ¿Desea continuar?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guarden de forma segura en su base de datos en el backend. ¡Gracias!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental que puede no funcionar como se esperaba y está sujeto a cambios en cualquier momento.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Valves actualizados con éxito",
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable para reemplazarlos con el contenido del portapapeles.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versión",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versión {{selectedVersion}} de {{totalVersions}}",
|
||||
"View Replies": "Ver respuestas",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Prompt Iradokizun Lehenetsiak",
|
||||
"Default to 389 or 636 if TLS is enabled": "Lehenetsi 389 edo 636 TLS gaituta badago",
|
||||
"Default to ALL": "Lehenetsi GUZTIAK",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Erabiltzaile Rol Lehenetsia",
|
||||
"Delete": "Ezabatu",
|
||||
"Delete a model": "Ezabatu eredu bat",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Sartu `--api` bandera stable-diffusion-webui exekutatzean",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informazioa",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Sartu komandoak",
|
||||
"Install from Github URL": "Instalatu Github URLtik",
|
||||
"Instant Auto-Send After Voice Transcription": "Bidalketa Automatiko Berehalakoa Ahots Transkripzioaren Ondoren",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Aurreko 30 egunak",
|
||||
"Previous 7 days": "Aurreko 7 egunak",
|
||||
"Private": "",
|
||||
"Profile Image": "Profil irudia",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt-a (adib. Kontatu datu dibertigarri bat Erromatar Inperioari buruz)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt-a ongi eguneratu da",
|
||||
"Prompts": "Prompt-ak",
|
||||
"Prompts Access": "Prompt sarbidea",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ekarri \"{{searchValue}}\" Ollama.com-etik",
|
||||
"Pull a model from Ollama.com": "Ekarri modelo bat Ollama.com-etik",
|
||||
"Query Generation Prompt": "Kontsulta sortzeko prompt-a",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Gaia",
|
||||
"Thinking...": "Pentsatzen...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Ekintza hau ezin da desegin. Jarraitu nahi duzu?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Balbulak ongi eguneratu dira",
|
||||
"variable": "aldagaia",
|
||||
"variable to have them replaced with clipboard content.": "aldagaia arbeleko edukiarekin ordezkatzeko.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Bertsioa",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "{{totalVersions}}-tik {{selectedVersion}}. bertsioa",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "پیشنهادات پرامپت پیش فرض",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "نقش کاربر پیش فرض",
|
||||
"Delete": "حذف",
|
||||
"Delete a model": "حذف یک مدل",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "اطلاعات",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "ورودی دستورات",
|
||||
"Install from Github URL": "نصب از ادرس Github",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 روز قبل",
|
||||
"Previous 7 days": "7 روز قبل",
|
||||
"Private": "",
|
||||
"Profile Image": "تصویر پروفایل",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "پیشنهاد (برای مثال: به من بگوید چیزی که برای من یک کاربرد داره درباره ایران)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "پرامپت\u200cها",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "بازگرداندن \"{{searchValue}}\" از Ollama.com",
|
||||
"Pull a model from Ollama.com": "دریافت یک مدل از Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "پوسته",
|
||||
"Thinking...": "در حال فکر...",
|
||||
"This action cannot be undone. Do you wish to continue?": "این اقدام قابل بازگردانی نیست. برای ادامه اطمینان دارید؟",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این تضمین می کند که مکالمات ارزشمند شما به طور ایمن در پایگاه داده بکند ذخیره می شود. تشکر!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "متغیر",
|
||||
"variable to have them replaced with clipboard content.": "متغیر برای جایگزینی آنها با محتوای بریده\u200cدان.",
|
||||
"Verify Connection": "",
|
||||
"Version": "نسخه",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "نسخهٔ {{selectedVersion}} از {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
"(e.g. `sh webui.sh --api`)": "(esim. `sh webui.sh --api`)",
|
||||
"(latest)": "(uusin)",
|
||||
"{{ models }}": "{{ mallit }}",
|
||||
"{{COUNT}} hidden lines": "",
|
||||
"{{COUNT}} hidden lines": "{{COUNT}} piilotettua riviä",
|
||||
"{{COUNT}} Replies": "{{COUNT}} vastausta",
|
||||
"{{user}}'s Chats": "{{user}}:n keskustelut",
|
||||
"{{webUIName}} Backend Required": "{{webUIName}}-backend vaaditaan",
|
||||
|
@ -52,7 +52,7 @@
|
|||
"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Ylläpitäjillä on pääsy kaikkiin työkaluihin koko ajan; käyttäjät tarvitsevat työkaluja mallille määritettynä työtilassa.",
|
||||
"Advanced Parameters": "Edistyneet parametrit",
|
||||
"Advanced Params": "Edistyneet parametrit",
|
||||
"All": "",
|
||||
"All": "Kaikki",
|
||||
"All Documents": "Kaikki asiakirjat",
|
||||
"All models deleted successfully": "Kaikki mallit poistettu onnistuneesti",
|
||||
"Allow Chat Controls": "Salli keskustelujen hallinta",
|
||||
|
@ -95,10 +95,10 @@
|
|||
"Are you sure?": "Oletko varma?",
|
||||
"Arena Models": "Arena-mallit",
|
||||
"Artifacts": "Artefaktit",
|
||||
"Ask": "",
|
||||
"Ask": "Kysy",
|
||||
"Ask a question": "Kysyä kysymys",
|
||||
"Assistant": "Avustaja",
|
||||
"Attach file from knowledge": "",
|
||||
"Attach file from knowledge": "Liitä tiedosto tietokannasta",
|
||||
"Attention to detail": "Huomio yksityiskohtiin",
|
||||
"Attribute for Mail": "",
|
||||
"Attribute for Username": "Käyttäjänimi-määritämä",
|
||||
|
@ -133,7 +133,7 @@
|
|||
"Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "",
|
||||
"Brave Search API Key": "Brave Search API -avain",
|
||||
"By {{name}}": "Tekijä {{name}}",
|
||||
"Bypass Embedding and Retrieval": "",
|
||||
"Bypass Embedding and Retrieval": "Ohita upotus ja haku",
|
||||
"Bypass SSL verification for Websites": "Ohita SSL-varmennus verkkosivustoille",
|
||||
"Calendar": "Kalenteri",
|
||||
"Call": "Puhelu",
|
||||
|
@ -167,7 +167,7 @@
|
|||
"Ciphers": "Salausalgoritmi",
|
||||
"Citation": "Lähdeviite",
|
||||
"Clear memory": "Tyhjennä muisti",
|
||||
"Clear Memory": "",
|
||||
"Clear Memory": "Tyhjennä Muisti",
|
||||
"click here": "klikkaa tästä",
|
||||
"Click here for filter guides.": "Katso suodatinohjeita klikkaamalla tästä.",
|
||||
"Click here for help.": "Klikkaa tästä saadaksesi apua.",
|
||||
|
@ -186,15 +186,15 @@
|
|||
"Clone Chat": "Kloonaa keskustelu",
|
||||
"Clone of {{TITLE}}": "{{TITLE}} klooni",
|
||||
"Close": "Sulje",
|
||||
"Code execution": "Koodin suorittaminen",
|
||||
"Code Execution": "Koodin suorittaminen",
|
||||
"Code execution": "Koodin suoritus",
|
||||
"Code Execution": "Koodin Suoritus",
|
||||
"Code Execution Engine": "Koodin suoritusmoottori",
|
||||
"Code Execution Timeout": "Koodin suorittamisen aikakatkaisu",
|
||||
"Code formatted successfully": "Koodin muotoilu onnistui",
|
||||
"Code Interpreter": "Ohjelmatulkki",
|
||||
"Code Interpreter Engine": "Ohjelmatulkin moottori",
|
||||
"Code Interpreter Prompt Template": "Ohjelmatulkin kehotemalli",
|
||||
"Collapse": "",
|
||||
"Collapse": "Pienennä",
|
||||
"Collection": "Kokoelma",
|
||||
"Color": "Väri",
|
||||
"ComfyUI": "ComfyUI",
|
||||
|
@ -216,7 +216,7 @@
|
|||
"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
|
||||
"Contact Admin for WebUI Access": "Ota yhteyttä ylläpitäjään WebUI-käyttöä varten",
|
||||
"Content": "Sisältö",
|
||||
"Content Extraction Engine": "",
|
||||
"Content Extraction Engine": "Sisällönpoimintamoottori",
|
||||
"Context Length": "Kontekstin pituus",
|
||||
"Continue Response": "Jatka vastausta",
|
||||
"Continue with {{provider}}": "Jatka palvelulla {{provider}}",
|
||||
|
@ -250,11 +250,11 @@
|
|||
"Created At": "Luotu",
|
||||
"Created by": "Luonut",
|
||||
"CSV Import": "CSV-tuonti",
|
||||
"Ctrl+Enter to Send": "",
|
||||
"Ctrl+Enter to Send": "Ctrl+Enter lähettääksesi",
|
||||
"Current Model": "Nykyinen malli",
|
||||
"Current Password": "Nykyinen salasana",
|
||||
"Custom": "Mukautettu",
|
||||
"Danger Zone": "",
|
||||
"Danger Zone": "Vaara-alue",
|
||||
"Dark": "Tumma",
|
||||
"Database": "Tietokanta",
|
||||
"December": "joulukuu",
|
||||
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Oletuskehotteiden ehdotukset",
|
||||
"Default to 389 or 636 if TLS is enabled": "Oletus 389 tai 636, jos TLS on käytössä",
|
||||
"Default to ALL": "Oletus KAIKKI",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Oletuskäyttäjärooli",
|
||||
"Delete": "Poista",
|
||||
"Delete a model": "Poista malli",
|
||||
|
@ -352,14 +353,14 @@
|
|||
"ElevenLabs": "ElevenLabs",
|
||||
"Email": "Sähköposti",
|
||||
"Embark on adventures": "Lähde seikkailuille",
|
||||
"Embedding": "",
|
||||
"Embedding": "Upotus",
|
||||
"Embedding Batch Size": "Upotuksen eräkoko",
|
||||
"Embedding Model": "Upotusmalli",
|
||||
"Embedding Model Engine": "Upotusmallin moottori",
|
||||
"Embedding model set to \"{{embedding_model}}\"": "\"{{embedding_model}}\" valittu upotusmalliksi",
|
||||
"Enable API Key": "Ota API -avain käyttöön",
|
||||
"Enable autocomplete generation for chat messages": "Ota automaattinen täydennys käyttöön keskusteluviesteissä",
|
||||
"Enable Code Execution": "",
|
||||
"Enable Code Execution": "Ota koodin suoritus käyttöön",
|
||||
"Enable Code Interpreter": "Ota ohjelmatulkki käyttöön",
|
||||
"Enable Community Sharing": "Ota yhteisön jakaminen käyttöön",
|
||||
"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ota Memory Locking (mlock) käyttöön estääksesi mallidatan vaihtamisen pois RAM-muistista. Tämä lukitsee mallin työsivut RAM-muistiin, varmistaen että niitä ei vaihdeta levylle. Tämä voi parantaa suorituskykyä välttämällä sivuvikoja ja varmistamalla nopean tietojen käytön.",
|
||||
|
@ -386,7 +387,7 @@
|
|||
"Enter description": "Kirjoita kuvaus",
|
||||
"Enter Document Intelligence Endpoint": "",
|
||||
"Enter Document Intelligence Key": "",
|
||||
"Enter domains separated by commas (e.g., example.com,site.org)": "Verkko-osoitteet erotetaan pilkulla (esim. esimerkki.com,sivu.org",
|
||||
"Enter domains separated by commas (e.g., example.com,site.org)": "Verkko-osoitteet erotetaan pilkulla (esim. esimerkki.com,sivu.org)",
|
||||
"Enter Exa API Key": "Kirjoita Exa API -avain",
|
||||
"Enter Github Raw URL": "Kirjoita Github Raw -verkko-osoite",
|
||||
"Enter Google PSE API Key": "Kirjoita Google PSE API -avain",
|
||||
|
@ -397,13 +398,13 @@
|
|||
"Enter Jupyter Token": "Kirjoita Juypyter token",
|
||||
"Enter Jupyter URL": "Kirjoita Jupyter verkko-osoite",
|
||||
"Enter Kagi Search API Key": "Kirjoita Kagi Search API -avain",
|
||||
"Enter Key Behavior": "",
|
||||
"Enter Key Behavior": "Enter näppäimen käyttäytyminen",
|
||||
"Enter language codes": "Kirjoita kielikoodit",
|
||||
"Enter Model ID": "Kirjoita mallitunnus",
|
||||
"Enter model tag (e.g. {{modelTag}})": "Kirjoita mallitagi (esim. {{modelTag}})",
|
||||
"Enter Mojeek Search API Key": "Kirjoita Mojeek Search API -avain",
|
||||
"Enter Number of Steps (e.g. 50)": "Kirjoita askelten määrä (esim. 50)",
|
||||
"Enter Perplexity API Key": "",
|
||||
"Enter Perplexity API Key": "Aseta Perplexity API-avain",
|
||||
"Enter proxy URL (e.g. https://user:password@host:port)": "Kirjoita välityspalvelimen verkko-osoite (esim. https://käyttäjä:salasana@host:portti)",
|
||||
"Enter reasoning effort": "",
|
||||
"Enter Sampler (e.g. Euler a)": "Kirjoita näytteistäjä (esim. Euler a)",
|
||||
|
@ -427,7 +428,7 @@
|
|||
"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Kirjoita julkinen WebUI verkko-osoitteesi. Verkko-osoitetta käytetään osoitteiden luontiin ilmoituksissa.",
|
||||
"Enter Tika Server URL": "Kirjoita Tika Server URL",
|
||||
"Enter timeout in seconds": "Aseta aikakatkaisu sekunneissa",
|
||||
"Enter to Send": "",
|
||||
"Enter to Send": "Enter lähettääksesi",
|
||||
"Enter Top K": "Kirjoita Top K",
|
||||
"Enter URL (e.g. http://127.0.0.1:7860/)": "Kirjoita verkko-osoite (esim. http://127.0.0.1:7860/)",
|
||||
"Enter URL (e.g. http://localhost:11434)": "Kirjoita verkko-osoite (esim. http://localhost:11434)",
|
||||
|
@ -454,10 +455,10 @@
|
|||
"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "",
|
||||
"Exclude": "Jätä pois",
|
||||
"Execute code for analysis": "Suorita koodi analysointia varten",
|
||||
"Expand": "",
|
||||
"Expand": "Laajenna",
|
||||
"Experimental": "Kokeellinen",
|
||||
"Explain": "",
|
||||
"Explain this section to me in more detail": "",
|
||||
"Explain": "Selitä",
|
||||
"Explain this section to me in more detail": "Selitä tämä osio minulle tarkemmin",
|
||||
"Explore the cosmos": "Tutki avaruutta",
|
||||
"Export": "Vie",
|
||||
"Export All Archived Chats": "Vie kaikki arkistoidut keskustelut",
|
||||
|
@ -530,7 +531,7 @@
|
|||
"General": "Yleinen",
|
||||
"Generate an image": "Luo kuva",
|
||||
"Generate Image": "Luo kuva",
|
||||
"Generate prompt pair": "",
|
||||
"Generate prompt pair": "Luo kehotepari",
|
||||
"Generating search query": "Luodaan hakukyselyä",
|
||||
"Get started": "Aloita",
|
||||
"Get started with {{WEBUI_NAME}}": "Aloita käyttämään {{WEBUI_NAME}}:iä",
|
||||
|
@ -583,10 +584,11 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Sisällytä `--api`-lippu ajettaessa stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Tiedot",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Syötekäskyt",
|
||||
"Install from Github URL": "Asenna Github-URL:stä",
|
||||
"Instant Auto-Send After Voice Transcription": "Heti automaattinen lähetys äänitunnistuksen jälkeen",
|
||||
"Integration": "",
|
||||
"Integration": "Integrointi",
|
||||
"Interface": "Käyttöliittymä",
|
||||
"Invalid file format.": "Virheellinen tiedostomuoto.",
|
||||
"Invalid Tag": "Virheellinen tagi",
|
||||
|
@ -634,11 +636,11 @@
|
|||
"Listening...": "Kuuntelee...",
|
||||
"Llama.cpp": "Llama.cpp",
|
||||
"LLMs can make mistakes. Verify important information.": "Kielimallit voivat tehdä virheitä. Tarkista tärkeät tiedot.",
|
||||
"Loader": "",
|
||||
"Loader": "Lataaja",
|
||||
"Loading Kokoro.js...": "Ladataan Kokoro.js...",
|
||||
"Local": "Paikallinen",
|
||||
"Local Models": "Paikalliset mallit",
|
||||
"Location access not allowed": "",
|
||||
"Location access not allowed": "Ei pääsyä sijaintitietoihin",
|
||||
"Logit Bias": "",
|
||||
"Lost": "Mennyt",
|
||||
"LTR": "LTR",
|
||||
|
@ -713,7 +715,7 @@
|
|||
"No HTML, CSS, or JavaScript content found.": "HTML-, CSS- tai JavaScript-sisältöä ei löytynyt.",
|
||||
"No inference engine with management support found": "",
|
||||
"No knowledge found": "Tietoa ei löytynyt",
|
||||
"No memories to clear": "",
|
||||
"No memories to clear": "Ei muistia tyhjennettäväksi",
|
||||
"No model IDs": "Ei mallitunnuksia",
|
||||
"No models found": "Malleja ei löytynyt",
|
||||
"No models selected": "Malleja ei ole valittu",
|
||||
|
@ -743,7 +745,7 @@
|
|||
"Ollama API settings updated": "Ollama API -asetukset päivitetty",
|
||||
"Ollama Version": "Ollama-versio",
|
||||
"On": "Päällä",
|
||||
"OneDrive": "",
|
||||
"OneDrive": "OneDrive",
|
||||
"Only alphanumeric characters and hyphens are allowed": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja",
|
||||
"Only alphanumeric characters and hyphens are allowed in the command string.": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja komentosarjassa.",
|
||||
"Only collections can be edited, create a new knowledge base to edit/add documents.": "Vain kokoelmia voi muokata, luo uusi tietokanta muokataksesi/lisätäksesi asiakirjoja.",
|
||||
|
@ -780,7 +782,7 @@
|
|||
"Permission denied when accessing microphone": "Käyttöoikeus evätty mikrofonille",
|
||||
"Permission denied when accessing microphone: {{error}}": "Käyttöoikeus evätty mikrofonille: {{error}}",
|
||||
"Permissions": "Käyttöoikeudet",
|
||||
"Perplexity API Key": "",
|
||||
"Perplexity API Key": "Perplexity API-avain",
|
||||
"Personalization": "Personointi",
|
||||
"Pin": "Kiinnitä",
|
||||
"Pinned": "Kiinnitetty",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Edelliset 30 päivää",
|
||||
"Previous 7 days": "Edelliset 7 päivää",
|
||||
"Private": "",
|
||||
"Profile Image": "Profiilikuva",
|
||||
"Prompt": "Kehote",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Kehote (esim. Kerro hauska fakta Rooman valtakunnasta)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Kehote päivitetty onnistuneesti",
|
||||
"Prompts": "Kehotteet",
|
||||
"Prompts Access": "Kehoitteiden käyttöoikeudet",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Lataa \"{{searchValue}}\" Ollama.comista",
|
||||
"Pull a model from Ollama.com": "Lataa malli Ollama.comista",
|
||||
"Query Generation Prompt": "Kyselytulosten luontikehote",
|
||||
|
@ -852,7 +856,7 @@
|
|||
"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Vastausilmoituksia ei voida ottaa käyttöön, koska verkkosivuston käyttöoikeudet on evätty. Myönnä tarvittavat käyttöoikeudet selaimesi asetuksista.",
|
||||
"Response splitting": "Vastauksen jakaminen",
|
||||
"Result": "Tulos",
|
||||
"Retrieval": "",
|
||||
"Retrieval": "Haku",
|
||||
"Retrieval Query Generation": "Hakukyselyn luominen",
|
||||
"Rich Text Input for Chat": "Rikasteksti-syöte chattiin",
|
||||
"RK": "RK",
|
||||
|
@ -972,7 +976,7 @@
|
|||
"Subtitle (e.g. about the Roman Empire)": "Alaotsikko (esim. Rooman valtakunta)",
|
||||
"Success": "Onnistui",
|
||||
"Successfully updated.": "Päivitetty onnistuneesti.",
|
||||
"Suggested": "Ehdotettu",
|
||||
"Suggested": "Ehdotukset",
|
||||
"Support": "Tuki",
|
||||
"Support this plugin:": "Tue tätä lisäosaa:",
|
||||
"Sync directory": "Synkronoitu hakemisto",
|
||||
|
@ -982,7 +986,7 @@
|
|||
"Tags Generation": "Tagien luonti",
|
||||
"Tags Generation Prompt": "Tagien luontikehote",
|
||||
"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
|
||||
"Talk to model": "",
|
||||
"Talk to model": "Puhu mallille",
|
||||
"Tap to interrupt": "Napauta keskeyttääksesi",
|
||||
"Tasks": "Tehtävät",
|
||||
"Tavily API Key": "Tavily API -avain",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Teema",
|
||||
"Thinking...": "Ajattelee...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Tätä toimintoa ei voi peruuttaa. Haluatko jatkaa?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1024,7 +1029,7 @@
|
|||
"Thought for {{DURATION}}": "Ajatteli {{DURATION}}",
|
||||
"Thought for {{DURATION}} seconds": "Ajatteli {{DURATION}} sekunttia",
|
||||
"Tika": "Tika",
|
||||
"Tika Server URL required.": "Tika Server URL vaaditaan.",
|
||||
"Tika Server URL required.": "Tika palvelimen verkko-osoite vaaditaan.",
|
||||
"Tiktoken": "Tiktoken",
|
||||
"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Vinkki: Päivitä useita muuttujapaikkoja peräkkäin painamalla tabulaattoria keskustelusyötteessä jokaisen korvauksen jälkeen.",
|
||||
"Title": "Otsikko",
|
||||
|
@ -1067,7 +1072,7 @@
|
|||
"Top P": "Top P",
|
||||
"Transformers": "Muunnokset",
|
||||
"Trouble accessing Ollama?": "Ongelmia Ollama-yhteydessä?",
|
||||
"Trust Proxy Environment": "",
|
||||
"Trust Proxy Environment": "Luota välityspalvelimen ympäristöön",
|
||||
"TTS Model": "Puhesynteesimalli",
|
||||
"TTS Settings": "Puhesynteesiasetukset",
|
||||
"TTS Voice": "Puhesynteesiääni",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Venttiilit päivitetty onnistuneesti",
|
||||
"variable": "muuttuja",
|
||||
"variable to have them replaced with clipboard content.": "muuttuja korvataan leikepöydän sisällöllä.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versio",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versio {{selectedVersion}} / {{totalVersions}}",
|
||||
"View Replies": "Näytä vastaukset",
|
||||
|
@ -1173,6 +1179,6 @@
|
|||
"Your account status is currently pending activation.": "Tilisi tila on tällä hetkellä odottaa aktivointia.",
|
||||
"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Koko panoksesi menee suoraan lisäosan kehittäjälle; Open WebUI ei pidätä prosenttiosuutta. Valittu rahoitusalusta voi kuitenkin periä omia maksujaan.",
|
||||
"Youtube": "YouTube",
|
||||
"Youtube Language": "",
|
||||
"Youtube Proxy URL": ""
|
||||
"Youtube Language": "Youtube kieli",
|
||||
"Youtube Proxy URL": "Youtube-välityspalvelimen verkko-osoite"
|
||||
}
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Suggestions de prompts par défaut",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Rôle utilisateur par défaut",
|
||||
"Delete": "Supprimer",
|
||||
"Delete a model": "Supprimer un modèle",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Entrez les commandes",
|
||||
"Install from Github URL": "Installer depuis l'URL GitHub",
|
||||
"Instant Auto-Send After Voice Transcription": "Envoi automatique instantané après transcription vocale",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 derniers jours",
|
||||
"Previous 7 days": "7 derniers jours",
|
||||
"Private": "",
|
||||
"Profile Image": "Image de profil",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
|
||||
"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Thème",
|
||||
"Thinking...": "En train de réfléchir...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version améliorée",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Suggestions de prompts par défaut",
|
||||
"Default to 389 or 636 if TLS is enabled": "Par défaut à 389 ou 636 si TLS est activé",
|
||||
"Default to ALL": "Par défaut à TOUS",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Rôle utilisateur par défaut",
|
||||
"Delete": "Supprimer",
|
||||
"Delete a model": "Supprimer un modèle",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Commandes d'entrée",
|
||||
"Install from Github URL": "Installer depuis une URL GitHub",
|
||||
"Instant Auto-Send After Voice Transcription": "Envoi automatique après la transcription",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Pénalité de présence",
|
||||
"Previous 30 days": "30 derniers jours",
|
||||
"Previous 7 days": "7 derniers jours",
|
||||
"Private": "",
|
||||
"Profile Image": "Image de profil",
|
||||
"Prompt": "Prompt",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt mis à jour avec succès",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "Accès aux prompts",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
|
||||
"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
|
||||
"Query Generation Prompt": "Prompt de génération de requête",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Thème",
|
||||
"Thinking...": "En train de réfléchir...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version:",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} de {{totalVersions}}",
|
||||
"View Replies": "Voir les réponses",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "הצעות ברירת מחדל לפקודות",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "תפקיד משתמש ברירת מחדל",
|
||||
"Delete": "מחק",
|
||||
"Delete a model": "מחק מודל",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "כלול את הדגל `--api` בעת הרצת stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "מידע",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "פקודות קלט",
|
||||
"Install from Github URL": "התקן מכתובת URL של Github",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 הימים הקודמים",
|
||||
"Previous 7 days": "7 הימים הקודמים",
|
||||
"Private": "",
|
||||
"Profile Image": "תמונת פרופיל",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "פקודה (למשל, ספר לי עובדה מעניינת על האימפריה הרומית)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "פקודות",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "משוך \"{{searchValue}}\" מ-Ollama.com",
|
||||
"Pull a model from Ollama.com": "משוך מודל מ-Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "נושא",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "משתנה",
|
||||
"variable to have them replaced with clipboard content.": "משתנה להחליפו ב- clipboard תוכן.",
|
||||
"Verify Connection": "",
|
||||
"Version": "גרסה",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "डिफ़ॉल्ट प्रॉम्प्ट सुझाव",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "डिफ़ॉल्ट उपयोगकर्ता भूमिका",
|
||||
"Delete": "डिलीट",
|
||||
"Delete a model": "एक मॉडल हटाएँ",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "सूचना-विषयक",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "इनपुट क命",
|
||||
"Install from Github URL": "Github URL से इंस्टॉल करें",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "पिछले 30 दिन",
|
||||
"Previous 7 days": "पिछले 7 दिन",
|
||||
"Private": "",
|
||||
"Profile Image": "प्रोफ़ाइल छवि",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "प्रॉम्प्ट (उदाहरण के लिए मुझे रोमन साम्राज्य के बारे में एक मजेदार तथ्य बताएं)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "प्रॉम्प्ट",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" को Ollama.com से खींचें",
|
||||
"Pull a model from Ollama.com": "Ollama.com से एक मॉडल खींचें",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "थीम",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "वेरिएबल",
|
||||
"variable to have them replaced with clipboard content.": "उन्हें क्लिपबोर्ड सामग्री से बदलने के लिए वेरिएबल।",
|
||||
"Verify Connection": "",
|
||||
"Version": "संस्करण",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Zadani prijedlozi prompta",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Zadana korisnička uloga",
|
||||
"Delete": "Izbriši",
|
||||
"Delete a model": "Izbriši model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Uključite zastavicu `--api` prilikom pokretanja stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informacije",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Unos naredbi",
|
||||
"Install from Github URL": "Instaliraj s Github URL-a",
|
||||
"Instant Auto-Send After Voice Transcription": "Trenutačno automatsko slanje nakon glasovne transkripcije",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Prethodnih 30 dana",
|
||||
"Previous 7 days": "Prethodnih 7 dana",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilna slika",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (npr. Reci mi zanimljivost o Rimskom carstvu)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompti",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Povucite \"{{searchValue}}\" s Ollama.com",
|
||||
"Pull a model from Ollama.com": "Povucite model s Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Razmišljam",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "varijabla",
|
||||
"variable to have them replaced with clipboard content.": "varijabla za zamjenu sadržajem međuspremnika.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Verzija",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Alapértelmezett prompt javaslatok",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Alapértelmezett felhasználói szerep",
|
||||
"Delete": "Törlés",
|
||||
"Delete a model": "Modell törlése",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Add hozzá a `--api` kapcsolót a stable-diffusion-webui futtatásakor",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Információ",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Beviteli parancsok",
|
||||
"Install from Github URL": "Telepítés Github URL-ről",
|
||||
"Instant Auto-Send After Voice Transcription": "Azonnali automatikus küldés hangfelismerés után",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Előző 30 nap",
|
||||
"Previous 7 days": "Előző 7 nap",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilkép",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (pl. Mondj egy érdekes tényt a Római Birodalomról)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Promptok",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" letöltése az Ollama.com-ról",
|
||||
"Pull a model from Ollama.com": "Modell letöltése az Ollama.com-ról",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Téma",
|
||||
"Thinking...": "Gondolkodik...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Ez a művelet nem vonható vissza. Szeretné folytatni?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Szelepek sikeresen frissítve",
|
||||
"variable": "változó",
|
||||
"variable to have them replaced with clipboard content.": "változó, hogy a vágólap tartalmával helyettesítse őket.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Verzió",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "{{selectedVersion}}. verzió a {{totalVersions}}-ból",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Saran Permintaan Default",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Peran Pengguna Default",
|
||||
"Delete": "Menghapus",
|
||||
"Delete a model": "Menghapus model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `--api` saat menjalankan stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Perintah masukan",
|
||||
"Install from Github URL": "Instal dari URL Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Kirim Otomatis Instan Setelah Transkripsi Suara",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 hari sebelumnya",
|
||||
"Previous 7 days": "7 hari sebelumnya",
|
||||
"Private": "",
|
||||
"Profile Image": "Gambar Profil",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Permintaan (mis. Ceritakan sebuah fakta menarik tentang Kekaisaran Romawi)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompt",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{searchValue}}\" dari Ollama.com",
|
||||
"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Berpikir",
|
||||
"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak dapat dibatalkan. Apakah Anda ingin melanjutkan?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Katup berhasil diperbarui",
|
||||
"variable": "variabel",
|
||||
"variable to have them replaced with clipboard content.": "variabel untuk diganti dengan konten papan klip.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versi",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Moltaí Leid Réamhshocraithe",
|
||||
"Default to 389 or 636 if TLS is enabled": "Réamhshocrú go 389 nó 636 má tá TLS cumasaithe",
|
||||
"Default to ALL": "Réamhshocrú do GACH",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Ról Úsáideora Réamhshocraithe",
|
||||
"Delete": "Scrios",
|
||||
"Delete a model": "Scrios múnla",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Cuir bratach `--api` san áireamh agus webui cobhsaí-scaipthe á rith",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Eolas",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Orduithe ionchuir",
|
||||
"Install from Github URL": "Suiteáil ó Github URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Seoladh Uathoibríoch Láithreach Tar éis",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Pionós Láithreacht",
|
||||
"Previous 30 days": "30 lá roimhe seo",
|
||||
"Previous 7 days": "7 lá roimhe seo",
|
||||
"Private": "",
|
||||
"Profile Image": "Íomhá Próifíl",
|
||||
"Prompt": "Leid",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Leid (m.sh. inis dom fíric spraíúil faoin Impireacht Rómhánach)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "D'éirigh leis an leid a nuashonrú",
|
||||
"Prompts": "Leabhair",
|
||||
"Prompts Access": "Rochtain ar Chuirí",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Tarraing \"{{searchValue}}\" ó Ollama.com",
|
||||
"Pull a model from Ollama.com": "Tarraing múnla ó Ollama.com",
|
||||
"Query Generation Prompt": "Cuirí Ginearáil Ceisteanna",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Téama",
|
||||
"Thinking...": "Ag smaoineamh...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Ní féidir an gníomh seo a chur ar ais. Ar mhaith leat leanúint ar aghaidh?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Comhlaí nuashonraíodh",
|
||||
"variable": "athraitheach",
|
||||
"variable to have them replaced with clipboard content.": "athróg chun ábhar gearrthaisce a chur in ionad iad.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Leagan",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Leagan {{selectedVersion}} de {{totalVersions}}",
|
||||
"View Replies": "Féach ar Fhreagraí",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Suggerimenti prompt predefiniti",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Ruolo utente predefinito",
|
||||
"Delete": "Elimina",
|
||||
"Delete a model": "Elimina un modello",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Includi il flag `--api` quando esegui stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informazioni",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Comandi di input",
|
||||
"Install from Github URL": "Eseguire l'installazione dall'URL di Github",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Ultimi 30 giorni",
|
||||
"Previous 7 days": "Ultimi 7 giorni",
|
||||
"Private": "",
|
||||
"Profile Image": "Immagine del profilo",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ad esempio Dimmi un fatto divertente sull'Impero Romano)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompt",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Estrai \"{{searchValue}}\" da Ollama.com",
|
||||
"Pull a model from Ollama.com": "Estrai un modello da Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "variabile",
|
||||
"variable to have them replaced with clipboard content.": "variabile per farli sostituire con il contenuto degli appunti.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versione",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "デフォルトのプロンプトの提案",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "デフォルトのユーザー役割",
|
||||
"Delete": "削除",
|
||||
"Delete a model": "モデルを削除",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webuiを実行する際に`--api`フラグを含める",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "情報",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "入力コマンド",
|
||||
"Install from Github URL": "Github URLからインストール",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "前の30日間",
|
||||
"Previous 7 days": "前の7日間",
|
||||
"Private": "",
|
||||
"Profile Image": "プロフィール画像",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "プロンプト(例:ローマ帝国についての楽しい事を教えてください)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "プロンプト",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com から \"{{searchValue}}\" をプル",
|
||||
"Pull a model from Ollama.com": "Ollama.com からモデルをプル",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "テーマ",
|
||||
"Thinking...": "思考中...",
|
||||
"This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "変数",
|
||||
"variable to have them replaced with clipboard content.": "クリップボードの内容に置き換える変数。",
|
||||
"Verify Connection": "",
|
||||
"Version": "バージョン",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "ნაგულისხმევი მოთხოვნის მინიშნებები",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "მომხმარებლის ნაგულისხმევი როლი",
|
||||
"Delete": "წაშლა",
|
||||
"Delete a model": "მოდელის წაშლა",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "ინფორმაცია",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "შეიყვანეთ ბრძანებები",
|
||||
"Install from Github URL": "დაყენება Github-ის ბმულიდან",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "წინა 30 დღე",
|
||||
"Previous 7 days": "წინა 7 დღე",
|
||||
"Private": "",
|
||||
"Profile Image": "პროფილის სურათი",
|
||||
"Prompt": "ბრძანების შეყვანის შეხსენება",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (მაგ. მითხარი სახალისო ფაქტი რომის იმპერიის შესახებ)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "მოთხოვნები",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\"-ის გადმოწერა Ollama.com-იდან",
|
||||
"Pull a model from Ollama.com": "მოდელის გადმოწერა Ollama.com-დან",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "თემა",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "ცვლადი",
|
||||
"variable to have them replaced with clipboard content.": "ცვლადი მისი ბუფერის მნიშვნელობით ჩასანაცვლებლად.",
|
||||
"Verify Connection": "",
|
||||
"Version": "ვერსია",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "기본 프롬프트 제안",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "기본 사용자 역할",
|
||||
"Delete": "삭제",
|
||||
"Delete a model": "모델 삭제",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api` 플래그를 포함하세요",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "정보",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "명령어 입력",
|
||||
"Install from Github URL": "Github URL에서 설치",
|
||||
"Instant Auto-Send After Voice Transcription": "음성 변환 후 즉시 자동 전송",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "이전 30일",
|
||||
"Previous 7 days": "이전 7일",
|
||||
"Private": "",
|
||||
"Profile Image": "프로필 이미지",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "프롬프트 (예: 로마 황제에 대해 재미있는 사실을 알려주세요)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "성공적으로 프롬프트를 수정했습니다",
|
||||
"Prompts": "프롬프트",
|
||||
"Prompts Access": "프롬프트 접근",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com에서 \"{{searchValue}}\" 가져오기",
|
||||
"Pull a model from Ollama.com": "Ollama.com에서 모델 가져오기(pull)",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "테마",
|
||||
"Thinking...": "생각 중...",
|
||||
"This action cannot be undone. Do you wish to continue?": "이 액션은 되돌릴 수 없습니다. 계속 하시겠습니까?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "성공적으로 밸브가 업데이트되었습니다",
|
||||
"variable": "변수",
|
||||
"variable to have them replaced with clipboard content.": "변수를 사용하여 클립보드 내용으로 바꾸세요.",
|
||||
"Verify Connection": "",
|
||||
"Version": "버전",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "버전 {{totalVersions}}의 {{selectedVersion}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Numatytieji užklausų pasiūlymai",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Numatytoji naudotojo rolė",
|
||||
"Delete": "ištrinti",
|
||||
"Delete a model": "Ištrinti modėlį",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Pridėti `--api` kai vykdomas stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informacija",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Įvesties komandos",
|
||||
"Install from Github URL": "Instaliuoti Github nuorodą",
|
||||
"Instant Auto-Send After Voice Transcription": "Siųsti iškart po balso transkripcijos",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Paskutinės 30 dienų",
|
||||
"Previous 7 days": "Paskutinės 7 dienos",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilio nuotrauka",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Užklausa (pvz. supaprastink šį laišką)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Užklausos",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Rasti \"{{searchValue}}\" iš Ollama.com",
|
||||
"Pull a model from Ollama.com": "Gauti modelį iš Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Mąsto...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Šis veiksmas negali būti atšauktas. Ar norite tęsti?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Įeitys atnaujintos sėkmingai",
|
||||
"variable": "kintamasis",
|
||||
"variable to have them replaced with clipboard content.": "kintamoji pakeičiama kopijuoklės turiniu.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versija",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Cadangan Gesaan Lalai",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Peranan Pengguna Lalai",
|
||||
"Delete": "Padam",
|
||||
"Delete a model": "Padam Model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `-- api ` semasa menjalankan stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Maklumat",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Masukkan Arahan",
|
||||
"Install from Github URL": "Pasang daripada URL Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Hantar Secara Automatik Dengan Segera Selepas Transkripsi Suara",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 hari sebelumnya",
|
||||
"Previous 7 days": "7 hari sebelumnya",
|
||||
"Private": "",
|
||||
"Profile Image": "Imej Profail",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Gesaan (cth Beritahu saya fakta yang menyeronokkan tentang Kesultanan Melaka)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Gesaan",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{ searchValue }}\" daripada Ollama.com",
|
||||
"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Berfikir...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak boleh diubah semula kepada asal. Adakah anda ingin teruskan",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "'Valves' berjaya dikemaskini",
|
||||
"variable": "pembolehubah",
|
||||
"variable to have them replaced with clipboard content.": "pembolehubah untuk ia digantikan dengan kandungan papan klip.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versi",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Standard forslag til ledetekster",
|
||||
"Default to 389 or 636 if TLS is enabled": "Velg 389 eller 636 som standard hvis TLS er aktivert",
|
||||
"Default to ALL": "Velg ALL som standard",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Standard brukerrolle",
|
||||
"Delete": "Slett",
|
||||
"Delete a model": "Slett en modell",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inkluder flagget --api når du kjører stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Inntast kommandoer",
|
||||
"Install from Github URL": "Installer fra GitHub-URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Øyeblikkelig automatisk sending etter taletranskripsjon",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Straff for opptreden",
|
||||
"Previous 30 days": "Siste 30 dager",
|
||||
"Previous 7 days": "Siste 7 dager",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilbilde",
|
||||
"Prompt": "Ledetekst",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Ledetekst (f.eks. Fortell meg noe morsomt om romerriket)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Ledetekst oppdatert",
|
||||
"Prompts": "Ledetekster",
|
||||
"Prompts Access": "Tilgang til ledetekster",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Hent {{searchValue}} fra Ollama.com",
|
||||
"Pull a model from Ollama.com": "Hent en modell fra Ollama.com",
|
||||
"Query Generation Prompt": "Ledetekst for genering av spørringer",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Tenker ...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Denne handlingen kan ikke angres. Vil du fortsette?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Ventilene er oppdatert",
|
||||
"variable": "variabel",
|
||||
"variable to have them replaced with clipboard content.": "variabel for å erstatte dem med utklippstavleinnhold.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versjon",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
|
||||
"View Replies": "Vis svar",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Standaard Prompt Suggesties",
|
||||
"Default to 389 or 636 if TLS is enabled": "Standaard 389 of 636 als TLS is ingeschakeld",
|
||||
"Default to ALL": "Standaar op ALL",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Standaard gebruikersrol",
|
||||
"Delete": "Verwijderen",
|
||||
"Delete a model": "Verwijder een model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Voer commando's in",
|
||||
"Install from Github URL": "Installeren vanaf Github-URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Direct automatisch verzenden na spraaktranscriptie",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Afgelopen 30 dagen",
|
||||
"Previous 7 days": "Afgelopen 7 dagen",
|
||||
"Private": "",
|
||||
"Profile Image": "Profielafbeelding",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (bv. Vertel me een leuke gebeurtenis over het Romeinse Rijk)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt succesvol bijgewerkt",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "Prompttoegang",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Haal \"{{searchValue}}\" uit Ollama.com",
|
||||
"Pull a model from Ollama.com": "Haal een model van Ollama.com",
|
||||
"Query Generation Prompt": "Vraaggeneratieprompt",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Thema",
|
||||
"Thinking...": "Aan het denken...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Kleppen succesvol bijgewerkt",
|
||||
"variable": "variabele",
|
||||
"variable to have them replaced with clipboard content.": "variabele om ze te laten vervangen door klembord inhoud.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versie",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versie {{selectedVersion}} van {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "ਮੂਲ ਪ੍ਰੰਪਟ ਸੁਝਾਅ",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "ਮੂਲ ਉਪਭੋਗਤਾ ਭੂਮਿਕਾ",
|
||||
"Delete": "ਮਿਟਾਓ",
|
||||
"Delete a model": "ਇੱਕ ਮਾਡਲ ਮਿਟਾਓ",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "ਸਟੇਬਲ-ਡਿਫਿਊਸ਼ਨ-ਵੈਬਯੂਆਈ ਚਲਾਉਣ ਸਮੇਂ `--api` ਝੰਡਾ ਸ਼ਾਮਲ ਕਰੋ",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "ਜਾਣਕਾਰੀ",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "ਇਨਪੁਟ ਕਮਾਂਡਾਂ",
|
||||
"Install from Github URL": "Github URL ਤੋਂ ਇੰਸਟਾਲ ਕਰੋ",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "ਪਿਛਲੇ 30 ਦਿਨ",
|
||||
"Previous 7 days": "ਪਿਛਲੇ 7 ਦਿਨ",
|
||||
"Private": "",
|
||||
"Profile Image": "ਪ੍ਰੋਫਾਈਲ ਚਿੱਤਰ",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "ਪ੍ਰੰਪਟ (ਉਦਾਹਰਣ ਲਈ ਮੈਨੂੰ ਰੋਮਨ ਸਾਮਰਾਜ ਬਾਰੇ ਇੱਕ ਮਜ਼ੇਦਾਰ ਤੱਥ ਦੱਸੋ)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "ਪ੍ਰੰਪਟ",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ \"{{searchValue}}\" ਖਿੱਚੋ",
|
||||
"Pull a model from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ ਇੱਕ ਮਾਡਲ ਖਿੱਚੋ",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "ਥੀਮ",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "ਵੈਰੀਏਬਲ",
|
||||
"variable to have them replaced with clipboard content.": "ਕਲਿੱਪਬੋਰਡ ਸਮੱਗਰੀ ਨਾਲ ਬਦਲਣ ਲਈ ਵੈਰੀਏਬਲ।",
|
||||
"Verify Connection": "",
|
||||
"Version": "ਵਰਜਨ",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Domyślne propozycje wpisów",
|
||||
"Default to 389 or 636 if TLS is enabled": "Domyślnie użyj 389 lub 636, jeśli TLS jest włączony",
|
||||
"Default to ALL": "Domyślne dla wszystkich",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Domyślna rola użytkownika",
|
||||
"Delete": "Usuń",
|
||||
"Delete a model": "Usuń model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Użyj flagi `--api` podczas uruchamiania stable-diffusion-webui.",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informacje",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Wprowadź polecenia",
|
||||
"Install from Github URL": "Instalacja z adresu URL serwisu Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Automatyczne natychmiastowe wysyłanie po transkrypcji głosowej",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "Kara za obecność",
|
||||
"Previous 30 days": "Ostatnie 30 dni",
|
||||
"Previous 7 days": "Ostatnie 7 dni",
|
||||
"Private": "",
|
||||
"Profile Image": "Zdjęcie profilowe",
|
||||
"Prompt": "Wprowadź podpowiedź: ",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (np. podaj ciekawostkę o Imperium Rzymskim)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Podpowiedź została zaktualizowana pomyślnie.",
|
||||
"Prompts": "Podpowiedzi",
|
||||
"Prompts Access": "Dostęp do podpowiedzi",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Pobierz \"{{searchValue}}\" z Ollama.com",
|
||||
"Pull a model from Ollama.com": "Pobierz model z Ollama.com",
|
||||
"Query Generation Prompt": "Podpowiedź do generowania zapytań",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Motyw",
|
||||
"Thinking...": "Myślę...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Czy na pewno chcesz kontynuować? Ta akcja nie może zostać cofnięta.",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Zawory zaktualizowane pomyślnie",
|
||||
"variable": "zmienna",
|
||||
"variable to have them replaced with clipboard content.": "Zmienna, która ma zostać zastąpiona zawartością schowka.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Wersja",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Wersja {{selectedVersion}} z {{totalVersions}}",
|
||||
"View Replies": "Wyświetl odpowiedzi",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "Padrão para TODOS",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Padrão para novos usuários",
|
||||
"Delete": "Excluir",
|
||||
"Delete a model": "Excluir um modelo",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Incluir a flag `--api` ao executar stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informação",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Comandos de entrada",
|
||||
"Install from Github URL": "Instalar da URL do Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Envio Automático Instantâneo Após Transcrição de Voz",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Últimos 30 dias",
|
||||
"Previous 7 days": "Últimos 7 dias",
|
||||
"Private": "",
|
||||
"Profile Image": "Imagem de Perfil",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por exemplo, Diga-me um fato divertido sobre o Império Romano)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt atualizado com sucesso",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "Acessar prompts",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Obter \"{{searchValue}}\" de Ollama.com",
|
||||
"Pull a model from Ollama.com": "Obter um modelo de Ollama.com",
|
||||
"Query Generation Prompt": "Prompt de Geração de Consulta",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Pensando...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Esta ação não pode ser desfeita. Você deseja continuar?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Válvulas atualizadas com sucesso",
|
||||
"variable": "variável",
|
||||
"variable to have them replaced with clipboard content.": "variável para ser substituída pelo conteúdo da área de transferência.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versão",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versão {{selectedVersion}} de {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Função de Utilizador Padrão",
|
||||
"Delete": "Apagar",
|
||||
"Delete a model": "Apagar um modelo",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informação",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Comandos de entrada",
|
||||
"Install from Github URL": "Instalar a partir do URL do Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Enviar automaticamente depois da transcrição da voz",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Últimos 30 dias",
|
||||
"Previous 7 days": "Últimos 7 dias",
|
||||
"Private": "",
|
||||
"Profile Image": "Imagem de Perfil",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ex.: Dê-me um facto divertido sobre o Império Romano)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompts",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Puxar \"{{searchValue}}\" do Ollama.com",
|
||||
"Pull a model from Ollama.com": "Puxar um modelo do Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "A pensar...",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "variável",
|
||||
"variable to have them replaced with clipboard content.": "variável para que sejam substituídos pelo conteúdo da área de transferência.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versão",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Sugestii de Prompt Implicite",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Rolul Implicit al Utilizatorului",
|
||||
"Delete": "Șterge",
|
||||
"Delete a model": "Șterge un model",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Includeți flag-ul `--api` când rulați stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Informații",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Comenzi de intrare",
|
||||
"Install from Github URL": "Instalează de la URL-ul Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Trimitere Automată Instantanee După Transcrierea Vocii",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Ultimele 30 de zile",
|
||||
"Previous 7 days": "Ultimele 7 zile",
|
||||
"Private": "",
|
||||
"Profile Image": "Imagine de Profil",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (de ex. Spune-mi un fapt amuzant despre Imperiul Roman)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompturi",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Extrage \"{{searchValue}}\" de pe Ollama.com",
|
||||
"Pull a model from Ollama.com": "Extrage un model de pe Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Temă",
|
||||
"Thinking...": "Gândește...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Această acțiune nu poate fi anulată. Doriți să continuați?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Valve actualizate cu succes",
|
||||
"variable": "variabilă",
|
||||
"variable to have them replaced with clipboard content.": "variabilă pentru a fi înlocuite cu conținutul clipboard-ului.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Versiune",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Versiunea {{selectedVersion}} din {{totalVersions}}",
|
||||
"View Replies": "Vezi răspunsurile",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Предложения промптов по умолчанию",
|
||||
"Default to 389 or 636 if TLS is enabled": "По умолчанию 389 или 636, если TLS включен.",
|
||||
"Default to ALL": "По умолчанию ВСЕ",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Роль пользователя по умолчанию",
|
||||
"Delete": "Удалить",
|
||||
"Delete a model": "Удалить модель",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Информация",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Введите команды",
|
||||
"Install from Github URL": "Установка с URL-адреса Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Мгновенная автоматическая отправка после расшифровки голоса",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Предыдущие 30 дней",
|
||||
"Previous 7 days": "Предыдущие 7 дней",
|
||||
"Private": "",
|
||||
"Profile Image": "Изображение профиля",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (например, Расскажи мне интересный факт о Римской империи)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Промпты",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Загрузить \"{{searchValue}}\" с Ollama.com",
|
||||
"Pull a model from Ollama.com": "Загрузить модель с Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Тема",
|
||||
"Thinking...": "Думаю...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Вентили успешно обновлены",
|
||||
"variable": "переменная",
|
||||
"variable to have them replaced with clipboard content.": "переменную, чтобы заменить их содержимым буфера обмена.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Версия",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Predvolené návrhy promptov",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Predvolená rola užívateľa",
|
||||
"Delete": "Odstrániť",
|
||||
"Delete a model": "Odstrániť model.",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Pri spustení stable-diffusion-webui zahrňte príznak `--api`.",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Info",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Vstupné príkazy",
|
||||
"Install from Github URL": "Inštalácia z URL adresy Githubu",
|
||||
"Instant Auto-Send After Voice Transcription": "Okamžité automatické odoslanie po prepisu hlasu",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Predchádzajúcich 30 dní",
|
||||
"Previous 7 days": "Predchádzajúcich 7 dní",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilový obrázok",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (napr. Povedz mi zábavnú skutočnosť o Rímskej ríši)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompty",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Stiahnite \"{{searchValue}}\" z Ollama.com",
|
||||
"Pull a model from Ollama.com": "Stiahnite model z Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Téma",
|
||||
"Thinking...": "Premýšľam...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Túto akciu nie je možné vrátiť späť. Prajete si pokračovať?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Ventily boli úspešne aktualizované.",
|
||||
"variable": "premenná",
|
||||
"variable to have them replaced with clipboard content.": "premennú, aby bol ich obsah nahradený obsahom schránky.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Verzia",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Verzia {{selectedVersion}} z {{totalVersions}}",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Подразумевани предлози упита",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Подразумевана улога корисника",
|
||||
"Delete": "Обриши",
|
||||
"Delete a model": "Обриши модел",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Укључи `--api` заставицу при покретању stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Инфо",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Унеси наредбе",
|
||||
"Install from Github URL": "Инсталирај из Гитхуб УРЛ адресе",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Претходних 30 дана",
|
||||
"Previous 7 days": "Претходних 7 дана",
|
||||
"Private": "",
|
||||
"Profile Image": "Слика профила",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Упит (нпр. „подели занимљивост о Римском царству“)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Упит измењен успешно",
|
||||
"Prompts": "Упити",
|
||||
"Prompts Access": "Приступ упитима",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Повуците \"{{searchValue}}\" са Ollama.com",
|
||||
"Pull a model from Ollama.com": "Повуците модел са Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Тема",
|
||||
"Thinking...": "Размишљам...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Ова радња се не може опозвати. Да ли желите наставити?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Вентили успешно ажурирани",
|
||||
"variable": "променљива",
|
||||
"variable to have them replaced with clipboard content.": "променљива за замену са садржајем оставе.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Издање",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "Погледај одговоре",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Standardinstruktionsförslag",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Standardanvändarroll",
|
||||
"Delete": "Radera",
|
||||
"Delete a model": "Ta bort en modell",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Inkludera flaggan `--api` när du kör stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Information",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Indatakommandon",
|
||||
"Install from Github URL": "Installera från Github-URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Skicka automatiskt efter rösttranskribering",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Föregående 30 dagar",
|
||||
"Previous 7 days": "Föregående 7 dagar",
|
||||
"Private": "",
|
||||
"Profile Image": "Profilbild",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Instruktion (t.ex. Berätta en kuriosa om Romerska Imperiet)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Instruktioner",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ladda ner \"{{searchValue}}\" från Ollama.com",
|
||||
"Pull a model from Ollama.com": "Ladda ner en modell från Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Tänker...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Denna åtgärd kan inte ångras. Vill du fortsätta?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "variabel",
|
||||
"variable to have them replaced with clipboard content.": "variabel för att få dem ersatta med urklippsinnehåll.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
|
||||
"View Replies": "Se svar",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "คำแนะนำพรอมต์ค่าเริ่มต้น",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "บทบาทผู้ใช้ค่าเริ่มต้น",
|
||||
"Delete": "ลบ",
|
||||
"Delete a model": "ลบโมเดล",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "รวมแฟลก `--api` เมื่อเรียกใช้ stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "ข้อมูล",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "คำสั่งป้อนข้อมูล",
|
||||
"Install from Github URL": "ติดตั้งจาก URL ของ Github",
|
||||
"Instant Auto-Send After Voice Transcription": "ส่งอัตโนมัติทันทีหลังจากการถอดเสียง",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 วันที่ผ่านมา",
|
||||
"Previous 7 days": "7 วันที่ผ่านมา",
|
||||
"Private": "",
|
||||
"Profile Image": "รูปโปรไฟล์",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "พรอมต์ (เช่น บอกข้อเท็จจริงที่น่าสนุกเกี่ยวกับจักรวรรดิโรมัน)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "พรอมต์",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "ธีม",
|
||||
"Thinking...": "กำลังคิด...",
|
||||
"This action cannot be undone. Do you wish to continue?": "การกระทำนี้ไม่สามารถย้อนกลับได้ คุณต้องการดำเนินการต่อหรือไม่?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "อัปเดตวาล์วเรียบร้อยแล้ว",
|
||||
"variable": "ตัวแปร",
|
||||
"variable to have them replaced with clipboard content.": "ตัวแปรเพื่อให้แทนที่ด้วยเนื้อหาคลิปบอร์ด",
|
||||
"Verify Connection": "",
|
||||
"Version": "เวอร์ชัน",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "",
|
||||
"Delete": "",
|
||||
"Delete a model": "",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "",
|
||||
"Install from Github URL": "",
|
||||
"Instant Auto-Send After Voice Transcription": "",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "",
|
||||
"Previous 7 days": "",
|
||||
"Private": "",
|
||||
"Profile Image": "",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "",
|
||||
"Pull a model from Ollama.com": "",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "",
|
||||
"Thinking...": "",
|
||||
"This action cannot be undone. Do you wish to continue?": "",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "",
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Verify Connection": "",
|
||||
"Version": "",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Varsayılan Prompt Önerileri",
|
||||
"Default to 389 or 636 if TLS is enabled": "TLS etkinse 389 veya 636'ya varsayılan olarak",
|
||||
"Default to ALL": "TÜMÜ'nü varsayılan olarak",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Varsayılan Kullanıcı Rolü",
|
||||
"Delete": "Sil",
|
||||
"Delete a model": "Bir modeli sil",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api` bayrağını dahil edin",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Bilgi",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Giriş komutları",
|
||||
"Install from Github URL": "Github URL'sinden yükleyin",
|
||||
"Instant Auto-Send After Voice Transcription": "Ses Transkripsiyonundan Sonra Anında Otomatik Gönder",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "Önceki 30 gün",
|
||||
"Previous 7 days": "Önceki 7 gün",
|
||||
"Private": "",
|
||||
"Profile Image": "Profil Fotoğrafı",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (örn. Roma İmparatorluğu hakkında ilginç bir bilgi verin)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Prompt başarıyla güncellendi",
|
||||
"Prompts": "Promptlar",
|
||||
"Prompts Access": "Promptlara Erişim",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com'dan \"{{searchValue}}\" çekin",
|
||||
"Pull a model from Ollama.com": "Ollama.com'dan bir model çekin",
|
||||
"Query Generation Prompt": "Sorgu Oluşturma Promptu",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Tema",
|
||||
"Thinking...": "Düşünüyor...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Bu eylem geri alınamaz. Devam etmek istiyor musunuz?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Valvler başarıyla güncellendi",
|
||||
"variable": "değişken",
|
||||
"variable to have them replaced with clipboard content.": "panodaki içerikle değiştirilmesi için değişken.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Sürüm",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Sürüm {{selectedVersion}} / {{totalVersions}}",
|
||||
"View Replies": "Yanıtları Görüntüle",
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
{
|
||||
"-1 for no limit, or a positive integer for a specific limit": "-1 для без обмежень або додатне ціле число для конкретного обмеження",
|
||||
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' or '-1' для відсутності терміну дії.",
|
||||
"(e.g. `sh webui.sh --api --api-auth username_password`)": "(e.g. `sh webui.sh --api --api-auth username_password`)",
|
||||
"(e.g. `sh webui.sh --api`)": "(e.g. `sh webui.sh --api`)",
|
||||
"(e.g. `sh webui.sh --api --api-auth username_password`)": "(напр. `sh webui.sh --api --api-auth username_password`)",
|
||||
"(e.g. `sh webui.sh --api`)": "(напр. `sh webui.sh --api`)",
|
||||
"(latest)": "(остання)",
|
||||
"{{ models }}": "{{ models }}",
|
||||
"{{COUNT}} hidden lines": "",
|
||||
"{{COUNT}} hidden lines": "{{COUNT}} прихованих рядків",
|
||||
"{{COUNT}} Replies": "{{COUNT}} Відповіді",
|
||||
"{{user}}'s Chats": "Чати {{user}}а",
|
||||
"{{webUIName}} Backend Required": "Необхідно підключення бекенду {{webUIName}}",
|
||||
|
@ -14,7 +14,7 @@
|
|||
"A task model is used when performing tasks such as generating titles for chats and web search queries": "Модель задач використовується при виконанні таких завдань, як генерація заголовків для чатів та пошукових запитів в Інтернеті",
|
||||
"a user": "користувача",
|
||||
"About": "Про програму",
|
||||
"Accept autocomplete generation / Jump to prompt variable": "",
|
||||
"Accept autocomplete generation / Jump to prompt variable": "Прийняти автоматичне доповнення / Перейти до змінної промта",
|
||||
"Access": "Доступ",
|
||||
"Access Control": "Контроль доступу",
|
||||
"Accessible to all users": "Доступно всім користувачам",
|
||||
|
@ -22,7 +22,7 @@
|
|||
"Account Activation Pending": "Очікування активації облікового запису",
|
||||
"Accurate information": "Точна інформація",
|
||||
"Actions": "Дії",
|
||||
"Activate": "",
|
||||
"Activate": "Активувати",
|
||||
"Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Активуйте цю команду, ввівши \"/{{COMMAND}}\" у введення чату.",
|
||||
"Active Users": "Активні користувачі",
|
||||
"Add": "Додати",
|
||||
|
@ -52,9 +52,9 @@
|
|||
"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Адміністратори мають доступ до всіх інструментів у будь-який час; користувачам потрібні інструменти, призначені для кожної моделі в робочій області.",
|
||||
"Advanced Parameters": "Розширені параметри",
|
||||
"Advanced Params": "Розширені параметри",
|
||||
"All": "",
|
||||
"All": "Усі",
|
||||
"All Documents": "Усі документи",
|
||||
"All models deleted successfully": "Всі моделі видалені успішно",
|
||||
"All models deleted successfully": "Усі моделі видалені успішно",
|
||||
"Allow Chat Controls": "Дозволити керування чатом",
|
||||
"Allow Chat Delete": "Дозволити видалення чату",
|
||||
"Allow Chat Deletion": "Дозволити видалення чату",
|
||||
|
@ -66,7 +66,7 @@
|
|||
"Allow Voice Interruption in Call": "Дозволити переривання голосу під час виклику",
|
||||
"Allowed Endpoints": "Дозволені кінцеві точки",
|
||||
"Already have an account?": "Вже є обліковий запис?",
|
||||
"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
|
||||
"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Альтернатива top_p, що спрямована на забезпечення балансу між якістю та різноманітністю. Параметр p представляє мінімальну ймовірність для врахування токена відносно ймовірності найбільш ймовірного токена. Наприклад, при p=0.05 і ймовірності найбільш ймовірного токена 0.9, логіти зі значенням менше 0.045 відфільтровуються.",
|
||||
"Always": "Завжди",
|
||||
"Amazing": "Чудово",
|
||||
"an assistant": "асистента",
|
||||
|
@ -85,27 +85,27 @@
|
|||
"applies to all users with the \"user\" role": "стосується всіх користувачів з роллю \"користувач\"",
|
||||
"April": "Квітень",
|
||||
"Archive": "Архів",
|
||||
"Archive All Chats": "Архівувати всі чати",
|
||||
"Archive All Chats": "Архівувати усі чати",
|
||||
"Archived Chats": "Архівовані чати",
|
||||
"archived-chat-export": "експорт-архівованих-чатів",
|
||||
"Are you sure you want to clear all memories? This action cannot be undone.": "",
|
||||
"Are you sure you want to clear all memories? This action cannot be undone.": "Ви впевнені, що хочете очистити усі спогади? Цю дію неможливо скасувати.",
|
||||
"Are you sure you want to delete this channel?": "Ви впевнені, що хочете видалити цей канал?",
|
||||
"Are you sure you want to delete this message?": "Ви впевнені, що хочете видалити це повідомлення?",
|
||||
"Are you sure you want to unarchive all archived chats?": "Ви впевнені, що хочете розархівувати всі архівовані чати?",
|
||||
"Are you sure you want to unarchive all archived chats?": "Ви впевнені, що хочете розархівувати усі архівовані чати?",
|
||||
"Are you sure?": "Ви впевнені?",
|
||||
"Arena Models": "Моделі Arena",
|
||||
"Artifacts": "Артефакти",
|
||||
"Ask": "",
|
||||
"Ask": "Запитати",
|
||||
"Ask a question": "Задати питання",
|
||||
"Assistant": "Асистент",
|
||||
"Attach file from knowledge": "",
|
||||
"Attach file from knowledge": "Прикріпити файл із знаннями",
|
||||
"Attention to detail": "Увага до деталей",
|
||||
"Attribute for Mail": "Атрибут для пошти",
|
||||
"Attribute for Username": "Атрибут для імені користувача",
|
||||
"Audio": "Аудіо",
|
||||
"August": "Серпень",
|
||||
"Authenticate": "Автентифікувати",
|
||||
"Authentication": "",
|
||||
"Authentication": "Аутентифікація",
|
||||
"Auto-Copy Response to Clipboard": "Автокопіювання відповіді в буфер обміну",
|
||||
"Auto-playback response": "Автоматичне відтворення відповіді",
|
||||
"Autocomplete Generation": "Генерація автозаповнення",
|
||||
|
@ -130,12 +130,12 @@
|
|||
"Bing Search V7 Endpoint": "Точка доступу Bing Search V7",
|
||||
"Bing Search V7 Subscription Key": "Ключ підписки Bing Search V7",
|
||||
"Bocha Search API Key": "Ключ API пошуку Bocha",
|
||||
"Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "",
|
||||
"Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Підсилення або штрафування конкретних токенів для обмежених відповідей. Значення зміщення будуть обмежені між -100 і 100 (включно). (За замовчуванням: відсутнє)",
|
||||
"Brave Search API Key": "Ключ API пошуку Brave",
|
||||
"By {{name}}": "Від {{name}}",
|
||||
"Bypass Embedding and Retrieval": "",
|
||||
"Bypass Embedding and Retrieval": "Минути вбудовування та пошук",
|
||||
"Bypass SSL verification for Websites": "Обхід SSL-перевірки для веб-сайтів",
|
||||
"Calendar": "",
|
||||
"Calendar": "Календар",
|
||||
"Call": "Виклик",
|
||||
"Call feature is not supported when using Web STT engine": "Функція виклику не підтримується при використанні Web STT (розпізнавання мовлення) рушія",
|
||||
"Camera": "Камера",
|
||||
|
@ -167,7 +167,7 @@
|
|||
"Ciphers": "Шифри",
|
||||
"Citation": "Цитування",
|
||||
"Clear memory": "Очистити пам'ять",
|
||||
"Clear Memory": "",
|
||||
"Clear Memory": "Очистити пам'ять",
|
||||
"click here": "натисніть тут",
|
||||
"Click here for filter guides.": "Натисніть тут для інструкцій із фільтрації",
|
||||
"Click here for help.": "Натисніть тут, щоб отримати допомогу.",
|
||||
|
@ -187,14 +187,14 @@
|
|||
"Clone of {{TITLE}}": "Клон {{TITLE}}",
|
||||
"Close": "Закрити",
|
||||
"Code execution": "Виконання коду",
|
||||
"Code Execution": "",
|
||||
"Code Execution Engine": "",
|
||||
"Code Execution Timeout": "",
|
||||
"Code Execution": "Виконання коду",
|
||||
"Code Execution Engine": "Рушій виконання коду",
|
||||
"Code Execution Timeout": "Тайм-аут виконання коду",
|
||||
"Code formatted successfully": "Код успішно відформатовано",
|
||||
"Code Interpreter": "Інтерпретатор коду",
|
||||
"Code Interpreter Engine": "Двигун інтерпретатора коду",
|
||||
"Code Interpreter Prompt Template": "Шаблон запиту інтерпретатора коду",
|
||||
"Collapse": "",
|
||||
"Collapse": "Згорнути",
|
||||
"Collection": "Колекція",
|
||||
"Color": "Колір",
|
||||
"ComfyUI": "ComfyUI",
|
||||
|
@ -213,19 +213,19 @@
|
|||
"Confirm your new password": "Підтвердіть свій новий пароль",
|
||||
"Connect to your own OpenAI compatible API endpoints.": "Підключіться до своїх власних API-ендпоінтів, сумісних з OpenAI.",
|
||||
"Connections": "З'єднання",
|
||||
"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
|
||||
"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Обмежує зусилля на міркування для моделей міркування. Діє лише для моделей міркування від конкретних постачальників, які підтримують зусилля міркування.",
|
||||
"Contact Admin for WebUI Access": "Зверніться до адміна для отримання доступу до WebUI",
|
||||
"Content": "Зміст",
|
||||
"Content Extraction Engine": "",
|
||||
"Content Extraction Engine": "Рушій вилучення контенту",
|
||||
"Context Length": "Довжина контексту",
|
||||
"Continue Response": "Продовжити відповідь",
|
||||
"Continue with {{provider}}": "Продовжити з {{provider}}",
|
||||
"Continue with Email": "Продовжити з електронною поштою",
|
||||
"Continue with LDAP": "Продовжити з LDAP",
|
||||
"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Керування розбиттям тексту повідомлення для TTS-запитів. 'Punctuation' розбиває на речення, 'paragraphs' розбиває на абзаци, а 'none' залишає повідомлення як один рядок.",
|
||||
"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
|
||||
"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Контролює повторення послідовностей токенів у згенерованому тексті. Вищий показник (напр., 1.5) сильніше штрафує за повторення, тоді як нижчий показник (напр., 1.1) буде більш м'яким. При значенні 1 ця опція вимкнена.",
|
||||
"Controls": "Керування",
|
||||
"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
|
||||
"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "Контролює баланс між узгодженістю та різноманітністю результату. Нижчий показник призведе до більш зосередженого та узгодженого тексту.",
|
||||
"Copied": "Скопійовано",
|
||||
"Copied shared chat URL to clipboard!": "Скопійовано URL-адресу спільного чату в буфер обміну!",
|
||||
"Copied to clipboard": "Скопійовано в буфер обміну",
|
||||
|
@ -250,11 +250,11 @@
|
|||
"Created At": "Створено у",
|
||||
"Created by": "Створено",
|
||||
"CSV Import": "Імпорт CSV",
|
||||
"Ctrl+Enter to Send": "",
|
||||
"Ctrl+Enter to Send": "Ctrl+Enter для відправки",
|
||||
"Current Model": "Поточна модель",
|
||||
"Current Password": "Поточний пароль",
|
||||
"Custom": "Налаштувати",
|
||||
"Danger Zone": "",
|
||||
"Danger Zone": "Зона небезпеки",
|
||||
"Dark": "Темна",
|
||||
"Database": "База даних",
|
||||
"December": "Грудень",
|
||||
|
@ -269,19 +269,20 @@
|
|||
"Default permissions updated successfully": "Дозволи за замовчуванням успішно оновлено",
|
||||
"Default Prompt Suggestions": "Пропозиції промтів замовчуванням",
|
||||
"Default to 389 or 636 if TLS is enabled": "За замовчуванням використовується 389 або 636, якщо TLS увімкнено.",
|
||||
"Default to ALL": "За замовчуванням — ВСІ.",
|
||||
"Default to ALL": "За замовчуванням — УСІ.",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Роль користувача за замовчуванням",
|
||||
"Delete": "Видалити",
|
||||
"Delete a model": "Видалити модель",
|
||||
"Delete All Chats": "Видалити усі чати",
|
||||
"Delete All Models": "Видалити всі моделі",
|
||||
"Delete All Models": "Видалити усі моделі",
|
||||
"Delete chat": "Видалити чат",
|
||||
"Delete Chat": "Видалити чат",
|
||||
"Delete chat?": "Видалити чат?",
|
||||
"Delete folder?": "Видалити папку?",
|
||||
"Delete function?": "Видалити функцію?",
|
||||
"Delete Message": "Видалити повідомлення",
|
||||
"Delete message?": "",
|
||||
"Delete message?": "Видалити повідомлення?",
|
||||
"Delete prompt?": "Видалити промт?",
|
||||
"delete this link": "видалити це посилання",
|
||||
"Delete tool?": "Видалити інструмент?",
|
||||
|
@ -300,7 +301,7 @@
|
|||
"Discover a model": "Знайдіть модель",
|
||||
"Discover a prompt": "Знайдіть промт",
|
||||
"Discover a tool": "Знайдіть інструмент",
|
||||
"Discover how to use Open WebUI and seek support from the community.": "",
|
||||
"Discover how to use Open WebUI and seek support from the community.": "Дізнайтесь, як використовувати Open WebUI, та звертайтесь за підтримкою до спільноти.",
|
||||
"Discover wonders": "Відкривайте чудеса",
|
||||
"Discover, download, and explore custom functions": "Знайдіть, завантажте та досліджуйте налаштовані функції",
|
||||
"Discover, download, and explore custom prompts": "Знайдіть, завантажте та досліджуйте налаштовані промти",
|
||||
|
@ -315,8 +316,8 @@
|
|||
"Do not install functions from sources you do not fully trust.": "Не встановлюйте функції з джерел, яким ви не повністю довіряєте.",
|
||||
"Do not install tools from sources you do not fully trust.": "Не встановлюйте інструменти з джерел, яким ви не повністю довіряєте.",
|
||||
"Document": "Документ",
|
||||
"Document Intelligence": "",
|
||||
"Document Intelligence endpoint and key required.": "",
|
||||
"Document Intelligence": "Інтелект документа",
|
||||
"Document Intelligence endpoint and key required.": "Потрібні кінцева точка та ключ для Інтелекту документа.",
|
||||
"Documentation": "Документація",
|
||||
"Documents": "Документи",
|
||||
"does not make any external connections, and your data stays securely on your locally hosted server.": "не встановлює жодних зовнішніх з'єднань, і ваші дані залишаються в безпеці на вашому локальному сервері.",
|
||||
|
@ -327,14 +328,14 @@
|
|||
"Don't like the style": "Не подобається стиль",
|
||||
"Done": "Готово",
|
||||
"Download": "Завантажити",
|
||||
"Download as SVG": "",
|
||||
"Download as SVG": "Завантажити як SVG",
|
||||
"Download canceled": "Завантаження скасовано",
|
||||
"Download Database": "Завантажити базу даних",
|
||||
"Drag and drop a file to upload or select a file to view": "Перетягніть файл для завантаження або виберіть файл для перегляду",
|
||||
"Draw": "Малювати",
|
||||
"Drop any files here to add to the conversation": "Перетягніть сюди файли, щоб додати до розмови",
|
||||
"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "напр., '30s','10m'. Дійсні одиниці часу: 'с', 'хв', 'г'.",
|
||||
"e.g. 60": "",
|
||||
"e.g. 60": "напр. 60",
|
||||
"e.g. A filter to remove profanity from text": "напр., фільтр для видалення нецензурної лексики з тексту",
|
||||
"e.g. My Filter": "напр., Мій фільтр",
|
||||
"e.g. My Tools": "напр., Мої інструменти",
|
||||
|
@ -352,20 +353,20 @@
|
|||
"ElevenLabs": "ElevenLabs",
|
||||
"Email": "Ел. пошта",
|
||||
"Embark on adventures": "Вирушайте в пригоди",
|
||||
"Embedding": "",
|
||||
"Embedding": "Вбудовування",
|
||||
"Embedding Batch Size": "Розмір пакету під час вбудовування",
|
||||
"Embedding Model": "Модель вбудовування",
|
||||
"Embedding Model Engine": "Рушій моделі вбудовування ",
|
||||
"Embedding model set to \"{{embedding_model}}\"": "Встановлена модель вбудовування \"{{embedding_model}}\"",
|
||||
"Enable API Key": "Увімкнути ключ API",
|
||||
"Enable autocomplete generation for chat messages": "Увімкнути генерацію автозаповнення для повідомлень чату",
|
||||
"Enable Code Execution": "",
|
||||
"Enable Code Execution": "Увімкнути виконання коду",
|
||||
"Enable Code Interpreter": "Увімкнути інтерпретатор коду",
|
||||
"Enable Community Sharing": "Увімкнути спільний доступ",
|
||||
"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Увімкнути блокування пам'яті (mlock), щоб запобігти виведенню даних моделі з оперативної пам'яті. Цей параметр блокує робочий набір сторінок моделі в оперативній пам'яті, гарантуючи, що вони не будуть виведені на диск. Це може допомогти підтримувати продуктивність, уникати помилок сторінок та забезпечувати швидкий доступ до даних.",
|
||||
"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Увімкнути відображення пам'яті (mmap) для завантаження даних моделі. Цей параметр дозволяє системі використовувати дискове сховище як розширення оперативної пам'яті, трактуючи файли на диску, як ніби вони знаходяться в RAM. Це може покращити продуктивність моделі, дозволяючи швидший доступ до даних. Однак, він може не працювати коректно на всіх системах і може споживати значну кількість дискового простору.",
|
||||
"Enable Message Rating": "Увімкнути оцінку повідомлень",
|
||||
"Enable Mirostat sampling for controlling perplexity.": "",
|
||||
"Enable Mirostat sampling for controlling perplexity.": "Увімкнути вибірку Mirostat для контролю перплексії.",
|
||||
"Enable New Sign Ups": "Дозволити нові реєстрації",
|
||||
"Enabled": "Увімкнено",
|
||||
"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Переконайтеся, що ваш CSV-файл містить 4 колонки в такому порядку: Ім'я, Email, Пароль, Роль.",
|
||||
|
@ -382,10 +383,10 @@
|
|||
"Enter CFG Scale (e.g. 7.0)": "Введіть масштаб CFG (напр., 7.0)",
|
||||
"Enter Chunk Overlap": "Введіть перекриття фрагменту",
|
||||
"Enter Chunk Size": "Введіть розмір фрагменту",
|
||||
"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
|
||||
"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Введіть пари \"токен:значення_зміщення\", розділені комами (напр.: 5432:100, 413:-100)",
|
||||
"Enter description": "Введіть опис",
|
||||
"Enter Document Intelligence Endpoint": "",
|
||||
"Enter Document Intelligence Key": "",
|
||||
"Enter Document Intelligence Endpoint": "Введіть кінцеву точку Інтелекту документа",
|
||||
"Enter Document Intelligence Key": "Введіть ключ Інтелекту документа",
|
||||
"Enter domains separated by commas (e.g., example.com,site.org)": "Введіть домени, розділені комами (наприклад, example.com, site.org)",
|
||||
"Enter Exa API Key": "Введіть ключ API Exa",
|
||||
"Enter Github Raw URL": "Введіть Raw URL-адресу Github",
|
||||
|
@ -397,13 +398,13 @@
|
|||
"Enter Jupyter Token": "Введіть токен Jupyter",
|
||||
"Enter Jupyter URL": "Введіть URL Jupyter",
|
||||
"Enter Kagi Search API Key": "Введіть ключ API Kagi Search",
|
||||
"Enter Key Behavior": "",
|
||||
"Enter Key Behavior": "Введіть поведінку клавіші",
|
||||
"Enter language codes": "Введіть мовні коди",
|
||||
"Enter Model ID": "Введіть ID моделі",
|
||||
"Enter model tag (e.g. {{modelTag}})": "Введіть тег моделі (напр., {{modelTag}})",
|
||||
"Enter Mojeek Search API Key": "Введіть API ключ для пошуку Mojeek",
|
||||
"Enter Number of Steps (e.g. 50)": "Введіть кількість кроків (напр., 50)",
|
||||
"Enter Perplexity API Key": "",
|
||||
"Enter Perplexity API Key": "Введіть ключ API для Perplexity",
|
||||
"Enter proxy URL (e.g. https://user:password@host:port)": "Введіть URL проксі (напр., https://user:password@host:port)",
|
||||
"Enter reasoning effort": "Введіть зусилля на міркування",
|
||||
"Enter Sampler (e.g. Euler a)": "Введіть семплер (напр., Euler a)",
|
||||
|
@ -413,8 +414,8 @@
|
|||
"Enter SearchApi Engine": "Введіть SearchApi рушія",
|
||||
"Enter Searxng Query URL": "Введіть URL-адресу запиту Searxng",
|
||||
"Enter Seed": "Введіть насіння",
|
||||
"Enter SerpApi API Key": "",
|
||||
"Enter SerpApi Engine": "",
|
||||
"Enter SerpApi API Key": "Введіть ключ API для SerpApi",
|
||||
"Enter SerpApi Engine": "Введіть рушій SerpApi",
|
||||
"Enter Serper API Key": "Введіть ключ API Serper",
|
||||
"Enter Serply API Key": "Введіть ключ API Serply",
|
||||
"Enter Serpstack API Key": "Введіть ключ API Serpstack",
|
||||
|
@ -425,9 +426,9 @@
|
|||
"Enter system prompt": "Введіть системний промт",
|
||||
"Enter Tavily API Key": "Введіть ключ API Tavily",
|
||||
"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Введіть публічний URL вашого WebUI. Цей URL буде використовуватися для генерування посилань у сповіщеннях.",
|
||||
"Enter Tika Server URL": "Введіть URL-адресу сервера Tika ",
|
||||
"Enter timeout in seconds": "",
|
||||
"Enter to Send": "",
|
||||
"Enter Tika Server URL": "Введіть URL-адресу сервера Tika",
|
||||
"Enter timeout in seconds": "Введіть тайм-аут у секундах",
|
||||
"Enter to Send": "Введіть для відправки",
|
||||
"Enter Top K": "Введіть Top K",
|
||||
"Enter URL (e.g. http://127.0.0.1:7860/)": "Введіть URL-адресу (напр., http://127.0.0.1:7860/)",
|
||||
"Enter URL (e.g. http://localhost:11434)": "Введіть URL-адресу (напр., http://localhost:11434)",
|
||||
|
@ -447,17 +448,17 @@
|
|||
"Evaluations": "Оцінювання",
|
||||
"Exa API Key": "Exa API ключ",
|
||||
"Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Приклад: (&(objectClass=inetOrgPerson)(uid=%s))",
|
||||
"Example: ALL": "Приклад: ВСІ",
|
||||
"Example: ALL": "Приклад: УСІ",
|
||||
"Example: mail": "Приклад: пошта",
|
||||
"Example: ou=users,dc=foo,dc=example": "Приклад: ou=users,dc=foo,dc=example",
|
||||
"Example: sAMAccountName or uid or userPrincipalName": "Приклад: sAMAccountName або uid або userPrincipalName",
|
||||
"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "",
|
||||
"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Перевищено кількість місць у вашій ліцензії. Будь ласка, зверніться до підтримки для збільшення кількості місць.",
|
||||
"Exclude": "Виключити",
|
||||
"Execute code for analysis": "Виконати код для аналізу",
|
||||
"Expand": "",
|
||||
"Expand": "Розгорнути",
|
||||
"Experimental": "Експериментальне",
|
||||
"Explain": "",
|
||||
"Explain this section to me in more detail": "",
|
||||
"Explain": "Пояснити",
|
||||
"Explain this section to me in more detail": "Поясніть цю секцію детальніше",
|
||||
"Explore the cosmos": "Досліджуйте космос",
|
||||
"Export": "Експорт",
|
||||
"Export All Archived Chats": "Експорт всіх архівованих чатів",
|
||||
|
@ -479,7 +480,7 @@
|
|||
"Failed to save models configuration": "Не вдалося зберегти конфігурацію моделей",
|
||||
"Failed to update settings": "Не вдалося оновити налаштування",
|
||||
"Failed to upload file.": "Не вдалося завантажити файл.",
|
||||
"Features": "",
|
||||
"Features": "Особливості",
|
||||
"Features Permissions": "Дозволи функцій",
|
||||
"February": "Лютий",
|
||||
"Feedback History": "Історія відгуків",
|
||||
|
@ -509,7 +510,7 @@
|
|||
"Form": "Форма",
|
||||
"Format your variables using brackets like this:": "Форматуйте свої змінні, використовуючи фігурні дужки таким чином:",
|
||||
"Frequency Penalty": "Штраф за частоту",
|
||||
"Full Context Mode": "",
|
||||
"Full Context Mode": "Режим повного контексту",
|
||||
"Function": "Функція",
|
||||
"Function Calling": "Виклик функцій",
|
||||
"Function created successfully": "Функцію успішно створено",
|
||||
|
@ -524,13 +525,13 @@
|
|||
"Functions allow arbitrary code execution": "Функції дозволяють виконання довільного коду",
|
||||
"Functions allow arbitrary code execution.": "Функції дозволяють виконання довільного коду.",
|
||||
"Functions imported successfully": "Функції успішно імпортовано",
|
||||
"Gemini": "",
|
||||
"Gemini API Config": "",
|
||||
"Gemini API Key is required.": "",
|
||||
"Gemini": "Gemini",
|
||||
"Gemini API Config": "Конфігурація Gemini API",
|
||||
"Gemini API Key is required.": "Потрібен ключ API Gemini.",
|
||||
"General": "Загальні",
|
||||
"Generate an image": "Згенерувати зображення",
|
||||
"Generate Image": "Створити зображення",
|
||||
"Generate prompt pair": "",
|
||||
"Generate prompt pair": "Згенерувати пару промтів",
|
||||
"Generating search query": "Сформувати пошуковий запит",
|
||||
"Get started": "Почати",
|
||||
"Get started with {{WEBUI_NAME}}": "Почати з {{WEBUI_NAME}}",
|
||||
|
@ -553,7 +554,7 @@
|
|||
"Hex Color": "Шістнадцятковий колір",
|
||||
"Hex Color - Leave empty for default color": "Шістнадцятковий колір — залиште порожнім для кольору за замовчуванням",
|
||||
"Hide": "Приховати",
|
||||
"Home": "",
|
||||
"Home": "Головна",
|
||||
"Host": "Хост",
|
||||
"How can I help you today?": "Чим я можу допомогти вам сьогодні?",
|
||||
"How would you rate this response?": "Як би ви оцінили цю відповідь?",
|
||||
|
@ -581,12 +582,13 @@
|
|||
"Include": "Включити",
|
||||
"Include `--api-auth` flag when running stable-diffusion-webui": "Включіть прапорець `--api-auth` під час запуску stable-diffusion-webui",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Включіть прапор `--api` при запуску stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Впливає на те, як швидко алгоритм реагує на відгуки згенерованого тексту. Нижча швидкість навчання призведе до повільніших коригувань, тоді як вища швидкість навчання зробить алгоритм більш чутливим.",
|
||||
"Info": "Інфо",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Команди вводу",
|
||||
"Install from Github URL": "Встановіть з URL-адреси Github",
|
||||
"Instant Auto-Send After Voice Transcription": "Миттєва автоматична відправка після транскрипції голосу",
|
||||
"Integration": "",
|
||||
"Integration": "Інтеграція",
|
||||
"Interface": "Інтерфейс",
|
||||
"Invalid file format.": "Неправильний формат файлу.",
|
||||
"Invalid Tag": "Недійсний тег",
|
||||
|
@ -624,22 +626,22 @@
|
|||
"LDAP server updated": "Сервер LDAP оновлено",
|
||||
"Leaderboard": "Таблиця лідерів",
|
||||
"Leave empty for unlimited": "Залиште порожнім для необмеженого розміру",
|
||||
"Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Залиште порожнім, щоб включити всі моделі з кінцевої точки \"{{URL}}/api/tags\"",
|
||||
"Leave empty to include all models from \"{{URL}}/models\" endpoint": "Залиште порожнім, щоб включити всі моделі з кінцевої точки \"{{URL}}/models\"",
|
||||
"Leave empty to include all models or select specific models": "Залиште порожнім, щоб включити всі моделі, або виберіть конкретні моделі.",
|
||||
"Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Залиште порожнім, щоб включити усі моделі з кінцевої точки \"{{URL}}/api/tags\"",
|
||||
"Leave empty to include all models from \"{{URL}}/models\" endpoint": "Залиште порожнім, щоб включити усі моделі з кінцевої точки \"{{URL}}/models\"",
|
||||
"Leave empty to include all models or select specific models": "Залиште порожнім, щоб включити усі моделі, або виберіть конкретні моделі.",
|
||||
"Leave empty to use the default prompt, or enter a custom prompt": "Залиште порожнім для використання стандартного запиту, або введіть власний запит",
|
||||
"Leave model field empty to use the default model.": "Залиште поле моделі порожнім, щоб використовувати модель за замовчуванням.",
|
||||
"License": "",
|
||||
"License": "Ліцензія",
|
||||
"Light": "Світла",
|
||||
"Listening...": "Слухаю...",
|
||||
"Llama.cpp": "Llama.cpp",
|
||||
"LLMs can make mistakes. Verify important information.": "LLMs можуть помилятися. Перевірте важливу інформацію.",
|
||||
"Loader": "",
|
||||
"Loader": "Завантажувач",
|
||||
"Loading Kokoro.js...": "Завантаження Kokoro.js...",
|
||||
"Local": "Локальний",
|
||||
"Local Models": "Локальні моделі",
|
||||
"Location access not allowed": "",
|
||||
"Logit Bias": "",
|
||||
"Location access not allowed": "Доступ до місцезнаходження не дозволено",
|
||||
"Logit Bias": "Логітне зміщення",
|
||||
"Lost": "Втрачене",
|
||||
"LTR": "LTR",
|
||||
"Made by Open WebUI Community": "Зроблено спільнотою OpenWebUI",
|
||||
|
@ -713,7 +715,7 @@
|
|||
"No HTML, CSS, or JavaScript content found.": "HTML, CSS або JavaScript контент не знайдено.",
|
||||
"No inference engine with management support found": "Не знайдено двигуна висновків з підтримкою керування",
|
||||
"No knowledge found": "Знання не знайдено.",
|
||||
"No memories to clear": "",
|
||||
"No memories to clear": "Немає спогадів для очищення",
|
||||
"No model IDs": "Немає ID моделей",
|
||||
"No models found": "Моделей не знайдено",
|
||||
"No models selected": "Моделі не вибрано",
|
||||
|
@ -743,7 +745,7 @@
|
|||
"Ollama API settings updated": "Налаштування Ollama API оновлено",
|
||||
"Ollama Version": "Версія Ollama",
|
||||
"On": "Увімк",
|
||||
"OneDrive": "",
|
||||
"OneDrive": "OneDrive",
|
||||
"Only alphanumeric characters and hyphens are allowed": "Дозволені тільки алфавітно-цифрові символи та дефіси",
|
||||
"Only alphanumeric characters and hyphens are allowed in the command string.": "У рядку команди дозволено використовувати лише алфавітно-цифрові символи та дефіси.",
|
||||
"Only collections can be edited, create a new knowledge base to edit/add documents.": "Редагувати можна лише колекції, створіть нову базу знань, щоб редагувати або додавати документи.",
|
||||
|
@ -780,7 +782,7 @@
|
|||
"Permission denied when accessing microphone": "Відмовлено у доступі до мікрофона",
|
||||
"Permission denied when accessing microphone: {{error}}": "Доступ до мікрофона заборонено: {{error}}",
|
||||
"Permissions": "Дозволи",
|
||||
"Perplexity API Key": "",
|
||||
"Perplexity API Key": "Ключ API для Perplexity",
|
||||
"Personalization": "Персоналізація",
|
||||
"Pin": "Зачепити",
|
||||
"Pinned": "Зачеплено",
|
||||
|
@ -795,7 +797,7 @@
|
|||
"Please carefully review the following warnings:": "Будь ласка, уважно ознайомтеся з наступними попередженнями:",
|
||||
"Please do not close the settings page while loading the model.": "Будь ласка, не закривайте сторінку налаштувань під час завантаження моделі.",
|
||||
"Please enter a prompt": "Будь ласка, введіть підказку",
|
||||
"Please fill in all fields.": "Будь ласка, заповніть всі поля.",
|
||||
"Please fill in all fields.": "Будь ласка, заповніть усі поля.",
|
||||
"Please select a model first.": "Будь ласка, спочатку виберіть модель.",
|
||||
"Please select a model.": "Будь ласка, виберіть модель.",
|
||||
"Please select a reason": "Будь ласка, виберіть причину",
|
||||
|
@ -803,9 +805,10 @@
|
|||
"Positive attitude": "Позитивне ставлення",
|
||||
"Prefix ID": "ID префікса",
|
||||
"Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "ID префікса використовується для уникнення конфліктів з іншими підключеннями шляхом додавання префікса до ID моделей — залиште порожнім, щоб вимкнути",
|
||||
"Presence Penalty": "",
|
||||
"Presence Penalty": "Штраф за присутність",
|
||||
"Previous 30 days": "Попередні 30 днів",
|
||||
"Previous 7 days": "Попередні 7 днів",
|
||||
"Private": "",
|
||||
"Profile Image": "Зображення профілю",
|
||||
"Prompt": "Підказка",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Підказка (напр., розкажіть мені цікавий факт про Римську імперію)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "Підказку успішно оновлено",
|
||||
"Prompts": "Промти",
|
||||
"Prompts Access": "Доступ до підказок",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Завантажити \"{{searchValue}}\" з Ollama.com",
|
||||
"Pull a model from Ollama.com": "Завантажити модель з Ollama.com",
|
||||
"Query Generation Prompt": "Підказка для генерації запиту",
|
||||
|
@ -826,7 +830,7 @@
|
|||
"Reasoning Effort": "Зусилля на міркування",
|
||||
"Record voice": "Записати голос",
|
||||
"Redirecting you to Open WebUI Community": "Перенаправляємо вас до спільноти OpenWebUI",
|
||||
"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
|
||||
"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Зменшує ймовірність генерування нісенітниць. Вищий показник (напр., 100) забезпечить більше різноманітних відповідей, тоді як нижчий показник (напр., 10) буде більш обережним.",
|
||||
"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Називайте себе \"Користувач\" (напр., \"Користувач вивчає іспанську мову\")",
|
||||
"References from": "Посилання з",
|
||||
"Refused when it shouldn't have": "Відмовив, коли не мав би",
|
||||
|
@ -838,21 +842,21 @@
|
|||
"Rename": "Переназвати",
|
||||
"Reorder Models": "Переставити моделі",
|
||||
"Repeat Last N": "Повторити останні N",
|
||||
"Repeat Penalty (Ollama)": "",
|
||||
"Repeat Penalty (Ollama)": "Штраф за повторення (Ollama)",
|
||||
"Reply in Thread": "Відповісти в потоці",
|
||||
"Request Mode": "Режим запиту",
|
||||
"Reranking Model": "Модель переранжування",
|
||||
"Reranking model disabled": "Модель переранжування вимкнена",
|
||||
"Reranking model set to \"{{reranking_model}}\"": "Модель переранжування встановлено на \"{{reranking_model}}\"",
|
||||
"Reset": "Скидання",
|
||||
"Reset All Models": "Скинути всі моделі",
|
||||
"Reset All Models": "Скинути усі моделі",
|
||||
"Reset Upload Directory": "Скинути каталог завантажень",
|
||||
"Reset Vector Storage/Knowledge": "Скинути векторне сховище/Знання",
|
||||
"Reset view": "Скинути вигляд",
|
||||
"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Сповіщення про відповіді не можуть бути активовані, оскільки вам було відмовлено в доступі до веб-сайту. Будь ласка, відвідайте налаштування вашого браузера, щоб надати необхідний доступ.",
|
||||
"Response splitting": "Розбиття відповіді",
|
||||
"Result": "Результат",
|
||||
"Retrieval": "",
|
||||
"Retrieval": "Пошук",
|
||||
"Retrieval Query Generation": "Генерація запиту для отримання даних",
|
||||
"Rich Text Input for Chat": "Ввід тексту з форматуванням для чату",
|
||||
"RK": "RK",
|
||||
|
@ -914,8 +918,8 @@
|
|||
"Send message": "Надіслати повідомлення",
|
||||
"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Відправляє `stream_options: { include_usage: true }` у запиті.\nПідтримувані постачальники повернуть інформацію про використання токену у відповіді, якщо вона встановлена.",
|
||||
"September": "Вересень",
|
||||
"SerpApi API Key": "",
|
||||
"SerpApi Engine": "",
|
||||
"SerpApi API Key": "Ключ API SerpApi",
|
||||
"SerpApi Engine": "Рушій SerpApi",
|
||||
"Serper API Key": "Ключ API Serper",
|
||||
"Serply API Key": "Ключ API Serply",
|
||||
"Serpstack API Key": "Ключ API Serpstack",
|
||||
|
@ -935,11 +939,11 @@
|
|||
"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Встановити кількість робочих потоків, що використовуються для обробки інформації. Ця опція керує кількістю потоків, що використовуються для обробки надходження запитів одночасно. Збільшення цього значення може підвищити продуктивність при великій одночасності робіт, але також може споживати більше ресурсів CPU.",
|
||||
"Set Voice": "Встановити голос",
|
||||
"Set whisper model": "Встановити модель whisper",
|
||||
"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
|
||||
"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
|
||||
"Sets how far back for the model to look back to prevent repetition.": "",
|
||||
"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
|
||||
"Sets the size of the context window used to generate the next token.": "",
|
||||
"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Встановлює фіксоване зміщення проти токенів, які з'явилися хоча б один раз. Вищий показник (напр., 1.5) сильніше штрафує за повторення, тоді як нижчий показник (напр., 0.9) буде більш м'яким. При значенні 0, ця опція вимкнена.",
|
||||
"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Встановлює масштабоване зміщення проти токенів для штрафування повторів, залежно від того, скільки разів вони з'являлися. Вищий показник (напр., 1.5) сильніше штрафує за повторення, тоді як нижчий показник (напр., 0.9) буде більш м'яким. При значенні 0, ця опція вимкнена.",
|
||||
"Sets how far back for the model to look back to prevent repetition.": "Встановлює, на скільки кроків назад модель повинна звертати увагу, щоб запобігти повторенням.",
|
||||
"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Встановлює початкове значення випадкового числа, яке використовується для генерації. Встановлення конкретного числа забезпечить однаковий текст для того ж запиту.",
|
||||
"Sets the size of the context window used to generate the next token.": "Встановлює розмір вікна контексту, яке використовується для генерації наступного токена.",
|
||||
"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Встановлює послідовності зупинки, які будуть використовуватися. Коли зустрічається така послідовність, LLM припиняє генерацію тексту і повертає результат. Можна встановити кілька послідовностей зупинки, вказавши кілька окремих параметрів зупинки у файлі моделі.",
|
||||
"Settings": "Налаштування",
|
||||
"Settings saved successfully!": "Налаштування успішно збережено!",
|
||||
|
@ -947,7 +951,7 @@
|
|||
"Share Chat": "Поділитися чатом",
|
||||
"Share to Open WebUI Community": "Поділитися зі спільнотою OpenWebUI",
|
||||
"Show": "Показати",
|
||||
"Show \"What's New\" modal on login": "Показати модальне вікно \"Що нового\" під час входу.",
|
||||
"Show \"What's New\" modal on login": "Показати модальне вікно \"Що нового\" під час входу",
|
||||
"Show Admin Details in Account Pending Overlay": "Відобразити дані адміна у вікні очікування облікового запису",
|
||||
"Show shortcuts": "Показати клавіатурні скорочення",
|
||||
"Show your support!": "Підтримайте нас!",
|
||||
|
@ -981,10 +985,10 @@
|
|||
"System Prompt": "Системний промт",
|
||||
"Tags Generation": "Генерація тегів",
|
||||
"Tags Generation Prompt": "Підказка для генерації тегів",
|
||||
"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
|
||||
"Talk to model": "",
|
||||
"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Вибірка без хвоста використовується для зменшення впливу менш ймовірних токенів на результат. Вищий показник (напр., 2.0) зменшить вплив сильніше, тоді як значення 1.0 вимикає цю опцію.",
|
||||
"Talk to model": "Спілкуватися з моделлю",
|
||||
"Tap to interrupt": "Натисніть, щоб перервати",
|
||||
"Tasks": "",
|
||||
"Tasks": "Завдання",
|
||||
"Tavily API Key": "Ключ API Tavily",
|
||||
"Tell us more:": "Розкажи нам більше:",
|
||||
"Temperature": "Температура",
|
||||
|
@ -996,7 +1000,7 @@
|
|||
"Thanks for your feedback!": "Дякуємо за ваш відгук!",
|
||||
"The Application Account DN you bind with for search": "DN облікового запису застосунку, з яким ви здійснюєте прив'язку для пошуку",
|
||||
"The base to search for users": "База для пошуку користувачів",
|
||||
"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
|
||||
"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Розмір пакету визначає, скільки текстових запитів обробляється одночасно. Більший розмір пакету може підвищити продуктивність і швидкість моделі, але також вимагає більше пам'яті.",
|
||||
"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Розробники цього плагіна - пристрасні волонтери зі спільноти. Якщо ви вважаєте цей плагін корисним, будь ласка, зробіть свій внесок у його розвиток.",
|
||||
"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Таблиця лідерів оцінки базується на системі рейтингу Ело і оновлюється в реальному часі.",
|
||||
"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-атрибут, який відповідає за пошту, яку користувачі використовують для входу.",
|
||||
|
@ -1005,21 +1009,22 @@
|
|||
"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максимальний розмір файлу в МБ. Якщо розмір файлу перевищує цей ліміт, файл не буде завантажено.",
|
||||
"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максимальна кількість файлів, які можна використати одночасно в чаті. Якщо кількість файлів перевищує цей ліміт, файли не будуть завантажені.",
|
||||
"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оцінка повинна бути в діапазоні від 0.0 (0%) до 1.0 (100%).",
|
||||
"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
|
||||
"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "Температура моделі. Збільшення температури зробить відповіді моделі більш креативними.",
|
||||
"Theme": "Тема",
|
||||
"Thinking...": "Думаю...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Цю дію не можна скасувати. Ви бажаєте продовжити?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Ця опція контролює, скільки токенів зберігається при оновленні контексту. Наприклад, якщо встановити значення 2, останні 2 токени контексту розмови будуть збережені. Збереження контексту допомагає підтримувати послідовність розмови, але може зменшити здатність реагувати на нові теми.",
|
||||
"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Ця опція встановлює максимальну кількість токенів, які модель може згенерувати у своїй відповіді. Збільшення цього ліміту дозволяє моделі надавати довші відповіді, але також може підвищити ймовірність генерації непотрібного або нерелевантного контенту.",
|
||||
"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Цей варіант видалить усі існуючі файли в колекції та замінить їх новими завантаженими файлами.",
|
||||
"This response was generated by \"{{model}}\"": "Цю відповідь згенеровано за допомогою \"{{model}}\"",
|
||||
"This will delete": "Це призведе до видалення",
|
||||
"This will delete <strong>{{NAME}}</strong> and <strong>all its contents</strong>.": "Це видалить <strong>{{NAME}}</strong> та <strong>всі його вмісти</strong>.",
|
||||
"This will delete <strong>{{NAME}}</strong> and <strong>all its contents</strong>.": "Це видалить <strong>{{NAME}}</strong> та <strong>усі його вмісти</strong>.",
|
||||
"This will delete all models including custom models": "Це видалить усі моделі, включаючи користувацькі моделі",
|
||||
"This will delete all models including custom models and cannot be undone.": "Це видалить усі моделі, включаючи користувацькі моделі, і не може бути скасовано.",
|
||||
"This will reset the knowledge base and sync all files. Do you wish to continue?": "Це скине базу знань і синхронізує всі файли. Ви бажаєте продовжити?",
|
||||
"This will reset the knowledge base and sync all files. Do you wish to continue?": "Це скине базу знань і синхронізує усі файли. Ви бажаєте продовжити?",
|
||||
"Thorough explanation": "Детальне пояснення",
|
||||
"Thought for {{DURATION}}": "Думка для {{DURATION}}",
|
||||
"Thought for {{DURATION}} seconds": "Думав протягом {{DURATION}} секунд.",
|
||||
|
@ -1031,7 +1036,7 @@
|
|||
"Title (e.g. Tell me a fun fact)": "Заголовок (напр., Розкажіть мені цікавий факт)",
|
||||
"Title Auto-Generation": "Автогенерація заголовків",
|
||||
"Title cannot be an empty string.": "Заголовок не може бути порожнім рядком.",
|
||||
"Title Generation": "",
|
||||
"Title Generation": "Генерація заголовка",
|
||||
"Title Generation Prompt": "Промт для генерування заголовків",
|
||||
"TLS": "TLS",
|
||||
"To access the available model names for downloading,": "Щоб отримати доступ до назв доступних для завантаження моделей,",
|
||||
|
@ -1067,7 +1072,7 @@
|
|||
"Top P": "Top P",
|
||||
"Transformers": "Трансформери",
|
||||
"Trouble accessing Ollama?": "Проблеми з доступом до Ollama?",
|
||||
"Trust Proxy Environment": "",
|
||||
"Trust Proxy Environment": "Довіряти середовищу проксі",
|
||||
"TTS Model": "Модель TTS",
|
||||
"TTS Settings": "Налаштування TTS",
|
||||
"TTS Voice": "Голос TTS",
|
||||
|
@ -1076,7 +1081,7 @@
|
|||
"Uh-oh! There was an issue with the response.": "Ой-ой! Сталася проблема з відповіддю.",
|
||||
"UI": "Користувацький інтерфейс",
|
||||
"Unarchive All": "Розархівувати все",
|
||||
"Unarchive All Archived Chats": "Розархівувати всі архівовані чати",
|
||||
"Unarchive All Archived Chats": "Розархівувати усі архівовані чати",
|
||||
"Unarchive Chat": "Розархівувати чат",
|
||||
"Unlock mysteries": "Розкрийте таємниці",
|
||||
"Unpin": "Відчепити",
|
||||
|
@ -1089,7 +1094,7 @@
|
|||
"Updated": "Оновлено",
|
||||
"Updated at": "Оновлено на",
|
||||
"Updated At": "Оновлено на",
|
||||
"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "",
|
||||
"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Оновіть до ліцензованого плану для розширених можливостей, включаючи кастомізацію теми та брендування, а також спеціалізовану підтримку.",
|
||||
"Upload": "Завантажити",
|
||||
"Upload a GGUF model": "Завантажити GGUF модель",
|
||||
"Upload directory": "Завантажити каталог",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Клапани успішно оновлено",
|
||||
"variable": "змінна",
|
||||
"variable to have them replaced with clipboard content.": "змінна, щоб замінити їх вмістом буфера обміну.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Версія",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "Версія {{selectedVersion}} з {{totalVersions}}",
|
||||
"View Replies": "Переглянути відповіді",
|
||||
|
@ -1127,11 +1133,11 @@
|
|||
"Warning": "Увага!",
|
||||
"Warning:": "Увага:",
|
||||
"Warning: Enabling this will allow users to upload arbitrary code on the server.": "Попередження: Увімкнення цього дозволить користувачам завантажувати довільний код на сервер.",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Попередження: Якщо ви оновлюєте або змінюєте модель вбудовування, вам потрібно буде повторно імпортувати всі документи.",
|
||||
"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Попередження: Якщо ви оновлюєте або змінюєте модель вбудовування, вам потрібно буде повторно імпортувати усі документи.",
|
||||
"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Попередження: Виконання коду в Jupyter дозволяє виконувати任 будь-який код, що становить серйозні ризики для безпеки — дійте з крайньою обережністю.",
|
||||
"Web": "Веб",
|
||||
"Web API": "Веб-API",
|
||||
"Web Search": "Веб-пошук",
|
||||
"Web Search": "Веб-Пошук",
|
||||
"Web Search Engine": "Веб-пошукова система",
|
||||
"Web Search in Chat": "Пошук в інтернеті в чаті",
|
||||
"Web Search Query Generation": "Генерація запиту для пошуку в мережі",
|
||||
|
@ -1149,7 +1155,7 @@
|
|||
"Why?": "Чому?",
|
||||
"Widescreen Mode": "Широкоекранний режим",
|
||||
"Won": "Переможець",
|
||||
"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
|
||||
"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Працює разом із top-k. Вищий показник (напр., 0.95) призведе до більш різноманітного тексту, тоді як нижчий показник (напр., 0.5) згенерує більш сфокусований та консервативний текст.",
|
||||
"Workspace": "Робочий простір",
|
||||
"Workspace Permissions": "Дозволи робочого простору.",
|
||||
"Write": "Писати",
|
||||
|
@ -1159,7 +1165,7 @@
|
|||
"Write your model template content here": "Напишіть вміст шаблону моделі тут",
|
||||
"Yesterday": "Вчора",
|
||||
"You": "Ви",
|
||||
"You are currently using a trial license. Please contact support to upgrade your license.": "",
|
||||
"You are currently using a trial license. Please contact support to upgrade your license.": "Ви наразі використовуєте пробну ліцензію. Будь ласка, зверніться до підтримки для оновлення вашої ліцензії.",
|
||||
"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Ви можете спілкуватися лише з максимальною кількістю {{maxCount}} файлів одночасно.",
|
||||
"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Ви можете налаштувати ваші взаємодії з мовними моделями, додавши спогади через кнопку 'Керувати' внизу, що зробить їх більш корисними та персоналізованими для вас.",
|
||||
"You cannot upload an empty file.": "Ви не можете завантажити порожній файл.",
|
||||
|
@ -1173,6 +1179,6 @@
|
|||
"Your account status is currently pending activation.": "Статус вашого облікового запису наразі очікує на активацію.",
|
||||
"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Весь ваш внесок піде безпосередньо розробнику плагіна; Open WebUI не бере жодних відсотків. Однак, обрана платформа фінансування може мати свої власні збори.",
|
||||
"Youtube": "Youtube",
|
||||
"Youtube Language": "",
|
||||
"Youtube Proxy URL": ""
|
||||
"Youtube Language": "Мова YouTube",
|
||||
"Youtube Proxy URL": "URL проксі-сервера YouTube"
|
||||
}
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "ڈیفالٹ پرامپٹ تجاویز",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "ڈیفالٹ صارف کا کردار",
|
||||
"Delete": "حذف کریں",
|
||||
"Delete a model": "ایک ماڈل حذف کریں",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "اسٹیبل-ڈیفیوژن-ویب یو آئی چلانے کے دوران `--api` فلیگ شامل کریں",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "معلومات",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "کمانڈز داخل کریں",
|
||||
"Install from Github URL": "گِٹ حب یو آر ایل سے انسٹال کریں",
|
||||
"Instant Auto-Send After Voice Transcription": "آواز کی نقل کے بعد فوری خودکار بھیجنا",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "پچھلے 30 دن",
|
||||
"Previous 7 days": "پچھلے 7 دن",
|
||||
"Private": "",
|
||||
"Profile Image": "پروفائل تصویر",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "سوال کریں (مثلاً: مجھے رومن سلطنت کے بارے میں کوئی دلچسپ حقیقت بتائیں)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "پرومپٹس",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com سے \"{{searchValue}}\" کو کھینچیں",
|
||||
"Pull a model from Ollama.com": "Ollama.com سے ماڈل حاصل کریں",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "تھیم",
|
||||
"Thinking...": "سوچ رہا ہے...",
|
||||
"This action cannot be undone. Do you wish to continue?": "یہ عمل واپس نہیں کیا جا سکتا کیا آپ جاری رکھنا چاہتے ہیں؟",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "والو کامیابی کے ساتھ اپ ڈیٹ ہو گئے",
|
||||
"variable": "متغیر",
|
||||
"variable to have them replaced with clipboard content.": "انہیں کلپ بورڈ کے مواد سے تبدیل کرنے کے لیے متغیر",
|
||||
"Verify Connection": "",
|
||||
"Version": "ورژن",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "ورژن {{selectedVersion}} کا {{totalVersions}} میں سے",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "Đề xuất prompt mặc định",
|
||||
"Default to 389 or 636 if TLS is enabled": "",
|
||||
"Default to ALL": "",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
|
||||
"Default User Role": "Vai trò mặc định",
|
||||
"Delete": "Xóa",
|
||||
"Delete a model": "Xóa mô hình",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
|
||||
"Info": "Thông tin",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
|
||||
"Input commands": "Nhập các câu lệnh",
|
||||
"Install from Github URL": "Cài đặt từ Github URL",
|
||||
"Instant Auto-Send After Voice Transcription": "Tự động gửi ngay lập tức sau khi phiên dịch giọng nói",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "",
|
||||
"Previous 30 days": "30 ngày trước",
|
||||
"Previous 7 days": "7 ngày trước",
|
||||
"Private": "",
|
||||
"Profile Image": "Ảnh đại diện",
|
||||
"Prompt": "",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ví dụ: Hãy kể cho tôi một sự thật thú vị về Đế chế La Mã)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "",
|
||||
"Prompts": "Prompt",
|
||||
"Prompts Access": "",
|
||||
"Public": "",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "Tải \"{{searchValue}}\" từ Ollama.com",
|
||||
"Pull a model from Ollama.com": "Tải mô hình từ Ollama.com",
|
||||
"Query Generation Prompt": "",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "Chủ đề",
|
||||
"Thinking...": "Đang suy luận...",
|
||||
"This action cannot be undone. Do you wish to continue?": "Hành động này không thể được hoàn tác. Bạn có muốn tiếp tục không?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "Đã cập nhật Valves thành công",
|
||||
"variable": "biến",
|
||||
"variable to have them replaced with clipboard content.": "biến để có chúng được thay thế bằng nội dung clipboard.",
|
||||
"Verify Connection": "",
|
||||
"Version": "Version",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "",
|
||||
"View Replies": "",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "默认提示词建议",
|
||||
"Default to 389 or 636 if TLS is enabled": "如果启用 TLS,则默认为 389 或 636",
|
||||
"Default to ALL": "默认为 ALL",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "默认进行分段检索以提取重点和相关内容 (推荐)",
|
||||
"Default User Role": "默认用户角色",
|
||||
"Delete": "删除",
|
||||
"Delete a model": "删除一个模型",
|
||||
|
@ -359,7 +360,7 @@
|
|||
"Embedding model set to \"{{embedding_model}}\"": "语义向量模型设置为 \"{{embedding_model}}\"",
|
||||
"Enable API Key": "启用 API 密钥",
|
||||
"Enable autocomplete generation for chat messages": "启用聊天消息的输入框内容猜测补全",
|
||||
"Enable Code Execution": "",
|
||||
"Enable Code Execution": "启用代码执行",
|
||||
"Enable Code Interpreter": "启用代码解释器",
|
||||
"Enable Community Sharing": "启用分享至社区",
|
||||
"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。",
|
||||
|
@ -451,7 +452,7 @@
|
|||
"Example: mail": "例如:mail",
|
||||
"Example: ou=users,dc=foo,dc=example": "例如:ou=users,dc=foo,dc=example",
|
||||
"Example: sAMAccountName or uid or userPrincipalName": "例如:sAMAccountName 或 uid 或 userPrincipalName",
|
||||
"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "",
|
||||
"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "已达到最大授权人数,请联系支持人员提升授权人数。",
|
||||
"Exclude": "排除",
|
||||
"Execute code for analysis": "执行代码进行分析",
|
||||
"Expand": "展开",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影响算法对生成文本反馈的响应速度。较低的学习率将导致调整更慢,而较高的学习率将使算法反应更灵敏。",
|
||||
"Info": "信息",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "注入整个内容作为上下文进行综合处理,适用于复杂查询",
|
||||
"Input commands": "输入命令",
|
||||
"Install from Github URL": "从 Github URL 安装",
|
||||
"Instant Auto-Send After Voice Transcription": "语音转录文字后即时自动发送",
|
||||
|
@ -640,7 +642,7 @@
|
|||
"Local Models": "本地模型",
|
||||
"Location access not allowed": "不允许访问位置信息",
|
||||
"Logit Bias": "Logit 偏置",
|
||||
"Lost": "丢失",
|
||||
"Lost": "落败",
|
||||
"LTR": "从左至右",
|
||||
"Made by Open WebUI Community": "由 OpenWebUI 社区制作",
|
||||
"Make sure to enclose them with": "确保将它们包含在内",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "重复惩罚(Presence Penalty)",
|
||||
"Previous 30 days": "过去 30 天",
|
||||
"Previous 7 days": "过去 7 天",
|
||||
"Private": "私有",
|
||||
"Profile Image": "用户头像",
|
||||
"Prompt": "提示词 (Prompt)",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示(例如:给我讲一个关于罗马帝国的趣事。)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "提示词更新成功",
|
||||
"Prompts": "提示词",
|
||||
"Prompts Access": "访问提示词",
|
||||
"Public": "公共",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "从 Ollama.com 拉取 \"{{searchValue}}\"",
|
||||
"Pull a model from Ollama.com": "从 Ollama.com 拉取一个模型",
|
||||
"Query Generation Prompt": "查询生成提示词",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "主题",
|
||||
"Thinking...": "正在思考...",
|
||||
"This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "此频道创建于{{createdAt}},这里是{{channelName}}频道的开始",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "值更新成功",
|
||||
"variable": "变量",
|
||||
"variable to have them replaced with clipboard content.": "变量将被剪贴板内容替换。",
|
||||
"Verify Connection": "验证连接",
|
||||
"Version": "版本",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "版本 {{selectedVersion}}/{{totalVersions}}",
|
||||
"View Replies": "查看回复",
|
||||
|
@ -1159,7 +1165,7 @@
|
|||
"Write your model template content here": "在此写入模型模板内容",
|
||||
"Yesterday": "昨天",
|
||||
"You": "你",
|
||||
"You are currently using a trial license. Please contact support to upgrade your license.": "",
|
||||
"You are currently using a trial license. Please contact support to upgrade your license.": "当前为试用许可证,请联系支持人员升级许可证。",
|
||||
"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "每次对话最多仅能附上 {{maxCount}} 个文件。",
|
||||
"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。",
|
||||
"You cannot upload an empty file.": "请勿上传空文件。",
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
"Default Prompt Suggestions": "預設提示詞建議",
|
||||
"Default to 389 or 636 if TLS is enabled": "如果啓用了 TLS 則預設為 389 或 636",
|
||||
"Default to ALL": "預設到所有",
|
||||
"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "預設使用分段檢索以提取聚焦且相關的內容,建議用於大多數情況。",
|
||||
"Default User Role": "預設使用者角色",
|
||||
"Delete": "刪除",
|
||||
"Delete a model": "刪除模型",
|
||||
|
@ -359,7 +360,7 @@
|
|||
"Embedding model set to \"{{embedding_model}}\"": "嵌入模型已設定為 \"{{embedding_model}}\"",
|
||||
"Enable API Key": "啟用 API 金鑰",
|
||||
"Enable autocomplete generation for chat messages": "啟用聊天訊息的自動完成生成",
|
||||
"Enable Code Execution": "",
|
||||
"Enable Code Execution": "啟用程式碼執行",
|
||||
"Enable Code Interpreter": "啟用程式碼解釋器",
|
||||
"Enable Community Sharing": "啟用社群分享",
|
||||
"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "啟用記憶體鎖定(mlock)以防止模型資料被換出 RAM。此選項會將模型的工作頁面集鎖定在 RAM 中,確保它們不會被換出到磁碟。這可以透過避免頁面錯誤和確保快速資料存取來維持效能。",
|
||||
|
@ -583,6 +584,7 @@
|
|||
"Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數",
|
||||
"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影響算法對生成文本回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使算法反應更靈敏。",
|
||||
"Info": "資訊",
|
||||
"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "將完整內容注入為上下文以進行全面處理,建議用於複雜查詢。",
|
||||
"Input commands": "輸入命令",
|
||||
"Install from Github URL": "從 GitHub URL 安裝",
|
||||
"Instant Auto-Send After Voice Transcription": "語音轉錄後立即自動傳送",
|
||||
|
@ -806,6 +808,7 @@
|
|||
"Presence Penalty": "在場懲罰",
|
||||
"Previous 30 days": "過去 30 天",
|
||||
"Previous 7 days": "過去 7 天",
|
||||
"Private": "私有",
|
||||
"Profile Image": "個人檔案圖片",
|
||||
"Prompt": "提示詞",
|
||||
"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示詞(例如:告訴我關於羅馬帝國的一些趣事)",
|
||||
|
@ -815,6 +818,7 @@
|
|||
"Prompt updated successfully": "提示詞更新成功",
|
||||
"Prompts": "提示詞",
|
||||
"Prompts Access": "提示詞存取",
|
||||
"Public": "公開",
|
||||
"Pull \"{{searchValue}}\" from Ollama.com": "從 Ollama.com 下載「{{searchValue}}」",
|
||||
"Pull a model from Ollama.com": "從 Ollama.com 下載模型",
|
||||
"Query Generation Prompt": "查詢生成提示詞",
|
||||
|
@ -1009,6 +1013,7 @@
|
|||
"Theme": "主題",
|
||||
"Thinking...": "正在思考...",
|
||||
"This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?",
|
||||
"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "此頻道創建於 {{createdAt}}。這是 {{channelName}} 頻道的起點。",
|
||||
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!",
|
||||
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。",
|
||||
"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此選項控制在刷新上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文有助於保持對話的連貫性,但也可能降低對新主題的回應能力。",
|
||||
|
@ -1118,6 +1123,7 @@
|
|||
"Valves updated successfully": "閥門更新成功",
|
||||
"variable": "變數",
|
||||
"variable to have them replaced with clipboard content.": "變數,以便將其替換為剪貼簿內容。",
|
||||
"Verify Connection": "驗證連線",
|
||||
"Version": "版本",
|
||||
"Version {{selectedVersion}} of {{totalVersions}}": "第 {{selectedVersion}} 版,共 {{totalVersions}} 版",
|
||||
"View Replies": "檢視回覆",
|
||||
|
|
|
@ -752,7 +752,7 @@ export const extractSentencesForAudio = (text: string) => {
|
|||
};
|
||||
|
||||
export const getMessageContentParts = (content: string, split_on: string = 'punctuation') => {
|
||||
content = removeDetails(content, ['reasoning', 'code_interpreter']);
|
||||
content = removeDetails(content, ['reasoning', 'code_interpreter', 'tool_calls']);
|
||||
const messageContentParts: string[] = [];
|
||||
|
||||
switch (split_on) {
|
||||
|
|
|
@ -171,7 +171,11 @@
|
|||
}
|
||||
|
||||
// Check if Ctrl + Shift + ' is pressed
|
||||
if (isCtrlPressed && isShiftPressed && event.key.toLowerCase() === `'`) {
|
||||
if (
|
||||
isCtrlPressed &&
|
||||
isShiftPressed &&
|
||||
(event.key.toLowerCase() === `'` || event.key.toLowerCase() === `"`)
|
||||
) {
|
||||
event.preventDefault();
|
||||
console.log('temporaryChat');
|
||||
temporaryChatEnabled.set(!$temporaryChatEnabled);
|
||||
|
|
Loading…
Reference in New Issue