mirror of https://github.com/microsoft/autogen.git
PGVector Support for Custom Connection Object (#2566)
* Added fixes and tests for basic auth format * User can provide their own connection object. Added test for it. * Updated instructions on how to use. Fully tested all 3 authentication methods successfully. * Get password from gitlab secrets. * Hide passwords. * Update notebook/agentchat_pgvector_RetrieveChat.ipynb Co-authored-by: Li Jiang <bnujli@gmail.com> * Hide passwords. * Added connection_string test. 3 tests total for auth. * Fixed quotes on db config params. No other changes found. * Ran notebook * Ran pre-commits and updated setup to include psycopg[binary] for windows and mac. * Corrected list extension. * Separate connection establishment function. Testing pending. * Fixed pgvectordb auth * Update agentchat_pgvector_RetrieveChat.ipynb Added autocommit=True in example * Rerun notebook --------- Co-authored-by: Li Jiang <bnujli@gmail.com> Co-authored-by: Li Jiang <lijiang1@microsoft.com>
This commit is contained in:
parent
129887519d
commit
6604ca511b
|
@ -1,3 +1,91 @@
|
|||
# Source code
|
||||
*.bash text eol=lf
|
||||
*.bat text eol=crlf
|
||||
*.cmd text eol=crlf
|
||||
*.coffee text
|
||||
*.css text diff=css eol=lf
|
||||
*.htm text diff=html eol=lf
|
||||
*.html text diff=html eol=lf
|
||||
*.inc text
|
||||
*.ini text
|
||||
*.js text
|
||||
*.json text eol=lf
|
||||
*.jsx text
|
||||
*.less text
|
||||
*.ls text
|
||||
*.map text -diff
|
||||
*.od text
|
||||
*.onlydata text
|
||||
*.php text diff=php
|
||||
*.pl text
|
||||
*.ps1 text eol=crlf
|
||||
*.py text diff=python eol=lf
|
||||
*.rb text diff=ruby eol=lf
|
||||
*.sass text
|
||||
*.scm text
|
||||
*.scss text diff=css
|
||||
*.sh text eol=lf
|
||||
.husky/* text eol=lf
|
||||
*.sql text
|
||||
*.styl text
|
||||
*.tag text
|
||||
*.ts text
|
||||
*.tsx text
|
||||
*.xml text
|
||||
*.xhtml text diff=html
|
||||
|
||||
# Docker
|
||||
Dockerfile text eol=lf
|
||||
|
||||
# Documentation
|
||||
*.ipynb text
|
||||
*.markdown text diff=markdown eol=lf
|
||||
*.md text diff=markdown eol=lf
|
||||
*.mdwn text diff=markdown eol=lf
|
||||
*.mdown text diff=markdown eol=lf
|
||||
*.mkd text diff=markdown eol=lf
|
||||
*.mkdn text diff=markdown eol=lf
|
||||
*.mdtxt text eol=lf
|
||||
*.mdtext text eol=lf
|
||||
*.txt text eol=lf
|
||||
AUTHORS text eol=lf
|
||||
CHANGELOG text eol=lf
|
||||
CHANGES text eol=lf
|
||||
CONTRIBUTING text eol=lf
|
||||
COPYING text eol=lf
|
||||
copyright text eol=lf
|
||||
*COPYRIGHT* text eol=lf
|
||||
INSTALL text eol=lf
|
||||
license text eol=lf
|
||||
LICENSE text eol=lf
|
||||
NEWS text eol=lf
|
||||
readme text eol=lf
|
||||
*README* text eol=lf
|
||||
TODO text
|
||||
|
||||
# Configs
|
||||
*.cnf text eol=lf
|
||||
*.conf text eol=lf
|
||||
*.config text eol=lf
|
||||
.editorconfig text
|
||||
.env text eol=lf
|
||||
.gitattributes text eol=lf
|
||||
.gitconfig text eol=lf
|
||||
.htaccess text
|
||||
*.lock text -diff
|
||||
package.json text eol=lf
|
||||
package-lock.json text eol=lf -diff
|
||||
pnpm-lock.yaml text eol=lf -diff
|
||||
.prettierrc text
|
||||
yarn.lock text -diff
|
||||
*.toml text eol=lf
|
||||
*.yaml text eol=lf
|
||||
*.yml text eol=lf
|
||||
browserslist text
|
||||
Makefile text eol=lf
|
||||
makefile text eol=lf
|
||||
|
||||
# Images
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
*.jpg filter=lfs diff=lfs merge=lfs -text
|
||||
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import re
|
||||
import urllib.parse
|
||||
from typing import Callable, List
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
@ -231,7 +231,14 @@ class Collection:
|
|||
exists = cursor.fetchone()[0]
|
||||
return exists
|
||||
|
||||
def get(self, ids=None, include=None, where=None, limit=None, offset=None) -> List[Document]:
|
||||
def get(
|
||||
self,
|
||||
ids: Optional[str] = None,
|
||||
include: Optional[str] = None,
|
||||
where: Optional[str] = None,
|
||||
limit: Optional[Union[int, str]] = None,
|
||||
offset: Optional[Union[int, str]] = None,
|
||||
) -> List[Document]:
|
||||
"""
|
||||
Retrieve documents from the collection.
|
||||
|
||||
|
@ -272,7 +279,6 @@ class Collection:
|
|||
|
||||
# Construct the full query
|
||||
query = f"{select_clause} {from_clause} {where_clause} {limit_clause} {offset_clause}"
|
||||
|
||||
retrieved_documents = []
|
||||
try:
|
||||
# Execute the query with the appropriate values
|
||||
|
@ -380,11 +386,11 @@ class Collection:
|
|||
def query(
|
||||
self,
|
||||
query_texts: List[str],
|
||||
collection_name: str = None,
|
||||
n_results: int = 10,
|
||||
distance_type: str = "euclidean",
|
||||
distance_threshold: float = -1,
|
||||
include_embedding: bool = False,
|
||||
collection_name: Optional[str] = None,
|
||||
n_results: Optional[int] = 10,
|
||||
distance_type: Optional[str] = "euclidean",
|
||||
distance_threshold: Optional[float] = -1,
|
||||
include_embedding: Optional[bool] = False,
|
||||
) -> QueryResults:
|
||||
"""
|
||||
Query documents in the collection.
|
||||
|
@ -450,7 +456,7 @@ class Collection:
|
|||
return results
|
||||
|
||||
@staticmethod
|
||||
def convert_string_to_array(array_string) -> List[float]:
|
||||
def convert_string_to_array(array_string: str) -> List[float]:
|
||||
"""
|
||||
Convert a string representation of an array to a list of floats.
|
||||
|
||||
|
@ -467,7 +473,7 @@ class Collection:
|
|||
array = [float(num) for num in array_string.split()]
|
||||
return array
|
||||
|
||||
def modify(self, metadata, collection_name: str = None) -> None:
|
||||
def modify(self, metadata, collection_name: Optional[str] = None) -> None:
|
||||
"""
|
||||
Modify metadata for the collection.
|
||||
|
||||
|
@ -486,7 +492,7 @@ class Collection:
|
|||
)
|
||||
cursor.close()
|
||||
|
||||
def delete(self, ids: List[ItemID], collection_name: str = None) -> None:
|
||||
def delete(self, ids: List[ItemID], collection_name: Optional[str] = None) -> None:
|
||||
"""
|
||||
Delete documents from the collection.
|
||||
|
||||
|
@ -504,7 +510,7 @@ class Collection:
|
|||
cursor.execute(f"DELETE FROM {self.name} WHERE id IN ({id_placeholders});", ids)
|
||||
cursor.close()
|
||||
|
||||
def delete_collection(self, collection_name: str = None) -> None:
|
||||
def delete_collection(self, collection_name: Optional[str] = None) -> None:
|
||||
"""
|
||||
Delete the entire collection.
|
||||
|
||||
|
@ -520,7 +526,7 @@ class Collection:
|
|||
cursor.execute(f"DROP TABLE IF EXISTS {self.name}")
|
||||
cursor.close()
|
||||
|
||||
def create_collection(self, collection_name: str = None) -> None:
|
||||
def create_collection(self, collection_name: Optional[str] = None) -> None:
|
||||
"""
|
||||
Create a new collection.
|
||||
|
||||
|
@ -557,16 +563,17 @@ class PGVectorDB(VectorDB):
|
|||
def __init__(
|
||||
self,
|
||||
*,
|
||||
connection_string: str = None,
|
||||
host: str = None,
|
||||
port: int = None,
|
||||
dbname: str = None,
|
||||
username: str = None,
|
||||
password: str = None,
|
||||
connect_timeout: int = 10,
|
||||
conn: Optional[psycopg.Connection] = None,
|
||||
connection_string: Optional[str] = None,
|
||||
host: Optional[str] = None,
|
||||
port: Optional[Union[int, str]] = None,
|
||||
dbname: Optional[str] = None,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
connect_timeout: Optional[int] = 10,
|
||||
embedding_function: Callable = None,
|
||||
metadata: dict = None,
|
||||
model_name: str = "all-MiniLM-L6-v2",
|
||||
metadata: Optional[dict] = None,
|
||||
model_name: Optional[str] = "all-MiniLM-L6-v2",
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the vector database.
|
||||
|
@ -574,6 +581,9 @@ class PGVectorDB(VectorDB):
|
|||
Note: connection_string or host + port + dbname must be specified
|
||||
|
||||
Args:
|
||||
conn: psycopg.Connection | A customer connection object to connect to the database.
|
||||
A connection object may include additional key/values:
|
||||
https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
connection_string: "postgresql://username:password@hostname:port/database" | The PGVector connection string. Default is None.
|
||||
host: str | The host to connect to. Default is None.
|
||||
port: int | The port to connect to. Default is None.
|
||||
|
@ -593,31 +603,16 @@ class PGVectorDB(VectorDB):
|
|||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
if connection_string:
|
||||
parsed_connection = urllib.parse.urlparse(connection_string)
|
||||
encoded_username = urllib.parse.quote(parsed_connection.username, safe="")
|
||||
encoded_password = urllib.parse.quote(parsed_connection.password, safe="")
|
||||
encoded_host = urllib.parse.quote(parsed_connection.hostname, safe="")
|
||||
encoded_database = urllib.parse.quote(parsed_connection.path[1:], safe="")
|
||||
connection_string_encoded = (
|
||||
f"{parsed_connection.scheme}://{encoded_username}:{encoded_password}"
|
||||
f"@{encoded_host}:{parsed_connection.port}/{encoded_database}"
|
||||
)
|
||||
self.client = psycopg.connect(conninfo=connection_string_encoded, autocommit=True)
|
||||
elif host and port and dbname:
|
||||
self.client = psycopg.connect(
|
||||
self.client = self.establish_connection(
|
||||
conn=conn,
|
||||
connection_string=connection_string,
|
||||
host=host,
|
||||
port=port,
|
||||
dbname=dbname,
|
||||
username=username,
|
||||
password=password,
|
||||
connect_timeout=connect_timeout,
|
||||
autocommit=True,
|
||||
)
|
||||
except psycopg.Error as e:
|
||||
logger.error("Error connecting to the database: ", e)
|
||||
raise e
|
||||
self.model_name = model_name
|
||||
try:
|
||||
self.embedding_function = (
|
||||
|
@ -630,10 +625,87 @@ class PGVectorDB(VectorDB):
|
|||
)
|
||||
raise e
|
||||
self.metadata = metadata
|
||||
self.client.execute("CREATE EXTENSION IF NOT EXISTS vector")
|
||||
register_vector(self.client)
|
||||
self.active_collection = None
|
||||
|
||||
def establish_connection(
|
||||
self,
|
||||
conn: Optional[psycopg.Connection] = None,
|
||||
connection_string: Optional[str] = None,
|
||||
host: Optional[str] = None,
|
||||
port: Optional[Union[int, str]] = None,
|
||||
dbname: Optional[str] = None,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
connect_timeout: Optional[int] = 10,
|
||||
) -> psycopg.Connection:
|
||||
"""
|
||||
Establishes a connection to a PostgreSQL database using psycopg.
|
||||
|
||||
Args:
|
||||
conn: An existing psycopg connection object. If provided, this connection will be used.
|
||||
connection_string: A string containing the connection information. If provided, a new connection will be established using this string.
|
||||
host: The hostname of the PostgreSQL server. Used if connection_string is not provided.
|
||||
port: The port number to connect to at the server host. Used if connection_string is not provided.
|
||||
dbname: The database name. Used if connection_string is not provided.
|
||||
username: The username to connect as. Used if connection_string is not provided.
|
||||
password: The user's password. Used if connection_string is not provided.
|
||||
connect_timeout: Maximum wait for connection, in seconds. The default is 10 seconds.
|
||||
|
||||
Returns:
|
||||
A psycopg.Connection object representing the established connection.
|
||||
|
||||
Raises:
|
||||
PermissionError if no credentials are supplied
|
||||
psycopg.Error: If an error occurs while trying to connect to the database.
|
||||
"""
|
||||
try:
|
||||
if conn:
|
||||
self.client = conn
|
||||
elif connection_string:
|
||||
parsed_connection = urllib.parse.urlparse(connection_string)
|
||||
encoded_username = urllib.parse.quote(parsed_connection.username, safe="")
|
||||
encoded_password = urllib.parse.quote(parsed_connection.password, safe="")
|
||||
encoded_password = f":{encoded_password}@"
|
||||
encoded_host = urllib.parse.quote(parsed_connection.hostname, safe="")
|
||||
encoded_port = f":{parsed_connection.port}"
|
||||
encoded_database = urllib.parse.quote(parsed_connection.path[1:], safe="")
|
||||
connection_string_encoded = (
|
||||
f"{parsed_connection.scheme}://{encoded_username}{encoded_password}"
|
||||
f"{encoded_host}{encoded_port}/{encoded_database}"
|
||||
)
|
||||
self.client = psycopg.connect(conninfo=connection_string_encoded, autocommit=True)
|
||||
elif host:
|
||||
connection_string = ""
|
||||
if host:
|
||||
encoded_host = urllib.parse.quote(host, safe="")
|
||||
connection_string += f"host={encoded_host} "
|
||||
if port:
|
||||
connection_string += f"port={port} "
|
||||
if dbname:
|
||||
encoded_database = urllib.parse.quote(dbname, safe="")
|
||||
connection_string += f"dbname={encoded_database} "
|
||||
if username:
|
||||
encoded_username = urllib.parse.quote(username, safe="")
|
||||
connection_string += f"user={encoded_username} "
|
||||
if password:
|
||||
encoded_password = urllib.parse.quote(password, safe="")
|
||||
connection_string += f"password={encoded_password} "
|
||||
|
||||
self.client = psycopg.connect(
|
||||
conninfo=connection_string,
|
||||
connect_timeout=connect_timeout,
|
||||
autocommit=True,
|
||||
)
|
||||
else:
|
||||
logger.error("Credentials were not supplied...")
|
||||
raise PermissionError
|
||||
self.client.execute("CREATE EXTENSION IF NOT EXISTS vector")
|
||||
except psycopg.Error as e:
|
||||
logger.error("Error connecting to the database: ", e)
|
||||
raise e
|
||||
return self.client
|
||||
|
||||
def create_collection(
|
||||
self, collection_name: str, overwrite: bool = False, get_or_create: bool = True
|
||||
) -> Collection:
|
||||
|
|
|
@ -40,17 +40,16 @@
|
|||
"version: '3.9'\n",
|
||||
"\n",
|
||||
"services:\n",
|
||||
" db:\n",
|
||||
" hostname: db\n",
|
||||
" image: ankane/pgvector\n",
|
||||
" pgvector:\n",
|
||||
" image: pgvector/pgvector:pg16\n",
|
||||
" shm_size: 128mb\n",
|
||||
" restart: unless-stopped\n",
|
||||
" ports:\n",
|
||||
" - 5432:5432\n",
|
||||
" restart: always\n",
|
||||
" - \"5432:5432\"\n",
|
||||
" environment:\n",
|
||||
" - POSTGRES_DB=postgres\n",
|
||||
" - POSTGRES_USER=postgres\n",
|
||||
" - POSTGRES_PASSWORD=postgres\n",
|
||||
" - POSTGRES_HOST_AUTH_METHOD=trust\n",
|
||||
" POSTGRES_USER: <postgres-user>\n",
|
||||
" POSTGRES_PASSWORD: <postgres-password>\n",
|
||||
" POSTGRES_DB: <postgres-database>\n",
|
||||
" volumes:\n",
|
||||
" - ./init.sql:/docker-entrypoint-initdb.d/init.sql\n",
|
||||
"```\n",
|
||||
|
@ -73,14 +72,14 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"models to use: ['Meta-Llama-3-8B-Instruct-imatrix', 'gpt-3.5-turbo-0125', 'gpt-35-turbo']\n"
|
||||
"models to use: ['gpt-35-turbo', 'gpt4-1106-preview', 'gpt-35-turbo-0613']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -89,6 +88,7 @@
|
|||
"import os\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import psycopg\n",
|
||||
"\n",
|
||||
"import autogen\n",
|
||||
"from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent\n",
|
||||
|
@ -137,7 +137,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -145,7 +145,7 @@
|
|||
"output_type": "stream",
|
||||
"text": [
|
||||
"Accepted file formats for `docs_path`:\n",
|
||||
"['org', 'pdf', 'md', 'docx', 'epub', 'rst', 'rtf', 'xml', 'ppt', 'txt', 'jsonl', 'msg', 'htm', 'yaml', 'html', 'xlsx', 'log', 'yml', 'odt', 'tsv', 'doc', 'pptx', 'csv', 'json']\n"
|
||||
"['txt', 'json', 'csv', 'tsv', 'md', 'html', 'htm', 'rtf', 'rst', 'jsonl', 'log', 'xml', 'yaml', 'yml', 'pdf']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -156,15 +156,17 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/lijiang1/anaconda3/envs/autogen/lib/python3.10/site-packages/torch/cuda/__init__.py:141: UserWarning: CUDA initialization: The NVIDIA driver on your system is too old (found version 11060). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver. (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:108.)\n",
|
||||
" return torch._C._cuda_getDeviceCount() > 0\n"
|
||||
"/workspace/anaconda3/envs/autogen/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||
"/workspace/anaconda3/envs/autogen/lib/python3.11/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -172,7 +174,7 @@
|
|||
"# 1. create an RetrieveAssistantAgent instance named \"assistant\"\n",
|
||||
"assistant = RetrieveAssistantAgent(\n",
|
||||
" name=\"assistant\",\n",
|
||||
" system_message=\"You are a helpful assistant.\",\n",
|
||||
" system_message=\"You are a helpful assistant. You must always reply with some form of text.\",\n",
|
||||
" llm_config={\n",
|
||||
" \"timeout\": 600,\n",
|
||||
" \"cache_seed\": 42,\n",
|
||||
|
@ -180,6 +182,9 @@
|
|||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Optionally create psycopg conn object\n",
|
||||
"# conn = psycopg.connect(conninfo=\"postgresql://postgres:postgres@localhost:5432/postgres\", autocommit=True)\n",
|
||||
"\n",
|
||||
"# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n",
|
||||
"# By default, the human_input_mode is \"ALWAYS\", which means the agent will ask for human input at every step. We set it to \"NEVER\" here.\n",
|
||||
"# `docs_path` is the path to the docs directory. It can also be the path to a single file, or the url to a single file. By default,\n",
|
||||
|
@ -208,12 +213,13 @@
|
|||
" \"collection_name\": \"flaml_collection\",\n",
|
||||
" \"db_config\": {\n",
|
||||
" \"connection_string\": \"postgresql://postgres:postgres@localhost:5432/postgres\", # Optional - connect to an external vector database\n",
|
||||
" # \"host\": postgres, # Optional vector database host\n",
|
||||
" # \"host\": \"postgres\", # Optional vector database host\n",
|
||||
" # \"port\": 5432, # Optional vector database port\n",
|
||||
" # \"database\": postgres, # Optional vector database name\n",
|
||||
" # \"username\": postgres, # Optional vector database username\n",
|
||||
" # \"password\": postgres, # Optional vector database password\n",
|
||||
" # \"dbname\": \"postgres\", # Optional vector database name\n",
|
||||
" # \"username\": \"postgres\", # Optional vector database username\n",
|
||||
" # \"password\": \"postgres\", # Optional vector database password\n",
|
||||
" \"model_name\": \"all-MiniLM-L6-v2\", # Sentence embedding model from https://huggingface.co/models?library=sentence-transformers or https://www.sbert.net/docs/pretrained_models.html\n",
|
||||
" # \"conn\": conn, # Optional - conn object to connect to database\n",
|
||||
" },\n",
|
||||
" \"get_or_create\": True, # set to False if you don't want to reuse an existing collection\n",
|
||||
" \"overwrite\": False, # set to True if you want to overwrite an existing collection\n",
|
||||
|
@ -238,14 +244,14 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-04-25 11:23:53,000 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `flaml_collection`.\u001b[0m\n"
|
||||
"2024-05-23 08:48:18,875 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `flaml_collection`.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -259,7 +265,11 @@
|
|||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-04-25 11:23:54,745 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n"
|
||||
"2024-05-23 08:48:19,975 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n",
|
||||
"2024-05-23 08:48:19,977 - autogen.agentchat.contrib.vectordb.pgvectordb - INFO - Error executing select on non-existent table: flaml_collection. Creating it instead. Error: relation \"flaml_collection\" does not exist\n",
|
||||
"LINE 1: SELECT id, metadatas, documents, embedding FROM flaml_collec...\n",
|
||||
" ^\u001b[0m\n",
|
||||
"2024-05-23 08:48:19,996 - autogen.agentchat.contrib.vectordb.pgvectordb - INFO - Created table flaml_collection\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -794,60 +804,7 @@
|
|||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
|
||||
"\n",
|
||||
"To use FLAML for a classification task and perform parallel training using Spark and train for 30 seconds while forcing cancel jobs if the time limit is reached, you can use the following code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import flaml\n",
|
||||
"from flaml.automl.spark.utils import to_pandas_on_spark\n",
|
||||
"from pyspark.ml.feature import VectorAssembler\n",
|
||||
"\n",
|
||||
"# load your classification dataset as a pandas DataFrame\n",
|
||||
"dataframe = ...\n",
|
||||
"\n",
|
||||
"# convert the pandas DataFrame to a pandas-on-spark DataFrame\n",
|
||||
"psdf = to_pandas_on_spark(dataframe)\n",
|
||||
"\n",
|
||||
"# define the label column\n",
|
||||
"label = ...\n",
|
||||
"\n",
|
||||
"# use VectorAssembler to merge all feature columns into a single vector column\n",
|
||||
"columns = psdf.columns\n",
|
||||
"feature_cols = [col for col in columns if col != label]\n",
|
||||
"featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n",
|
||||
"psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n",
|
||||
"\n",
|
||||
"# configure the AutoML settings\n",
|
||||
"settings = {\n",
|
||||
" \"time_budget\": 30,\n",
|
||||
" \"metric\": 'accuracy',\n",
|
||||
" \"task\": 'classification',\n",
|
||||
" \"log_file_name\": 'classification.log',\n",
|
||||
" \"estimator_list\": ['lgbm_spark'],\n",
|
||||
" \"n_concurrent_trials\": 2,\n",
|
||||
" \"use_spark\": True,\n",
|
||||
" \"force_cancel\": True\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# create and run the AutoML experiment\n",
|
||||
"automl = flaml.AutoML()\n",
|
||||
"automl.fit(\n",
|
||||
" dataframe=psdf,\n",
|
||||
" label=label,\n",
|
||||
" **settings\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Note that you will need to replace the placeholders with your own dataset and label column names. This code will use FLAML's `lgbm_spark` estimator for training the classification model in parallel using Spark. The training will be restricted to 30 seconds, and if the time limit is reached, FLAML will force cancel the Spark jobs.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
|
||||
"\n",
|
||||
"UPDATE CONTEXT\n",
|
||||
"To use FLAML to perform a classification task and use Spark to do parallel training, you need to use the Spark ML estimators for AutoML. First, you need to prepare your data in the required format as described in the previous section. FLAML provides a convenient function \"to_pandas_on_spark\" to convert your data into a pandas-on-spark dataframe/series, which Spark estimators require. After that, use the pandas-on-spark data like non-spark data and pass them using X_train, y_train or dataframe, label. Finally, configure FLAML to use Spark as the parallel backend during parallel tuning by setting the use_spark to true. An example code snippet is provided in the context above.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
|
@ -883,7 +840,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -1153,276 +1110,18 @@
|
|||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
|
||||
"\n",
|
||||
"The authors of FLAML are Chi Wang, Qingyun Wu, Markus Weimer, and Erkang Zhu.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[32mAdding content of doc bdfbc921 to context.\u001b[0m\n",
|
||||
"\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n",
|
||||
"context provided by the user.\n",
|
||||
"If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n",
|
||||
"For code generation, you must obey the following rules:\n",
|
||||
"Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n",
|
||||
"Rule 2. You must follow the formats below to write your code:\n",
|
||||
"```language\n",
|
||||
"# your code\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"User's question is: Who is the author of FLAML?\n",
|
||||
"\n",
|
||||
"Context is: # Research\n",
|
||||
"\n",
|
||||
"For technical details, please check our research publications.\n",
|
||||
"\n",
|
||||
"- [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wang2021flaml,\n",
|
||||
" title={FLAML: A Fast and Lightweight AutoML Library},\n",
|
||||
" author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={MLSys},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wu2021cfo,\n",
|
||||
" title={Frugal Optimization for Cost-related Hyperparameters},\n",
|
||||
" author={Qingyun Wu and Chi Wang and Silu Huang},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={AAAI},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wang2021blendsearch,\n",
|
||||
" title={Economical Hyperparameter Optimization With Blended Search Strategy},\n",
|
||||
" author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={ICLR},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{liuwang2021hpolm,\n",
|
||||
" title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n",
|
||||
" author={Susan Xueqing Liu and Chi Wang},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={ACL},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wu2021chacha,\n",
|
||||
" title={ChaCha for Online AutoML},\n",
|
||||
" author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={ICML},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wuwang2021fairautoml,\n",
|
||||
" title={Fair AutoML},\n",
|
||||
" author={Qingyun Wu and Chi Wang},\n",
|
||||
" year={2021},\n",
|
||||
" booktitle={ArXiv preprint arXiv:2111.06495},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{kayaliwang2022default,\n",
|
||||
" title={Mining Robust Default Configurations for Resource-constrained AutoML},\n",
|
||||
" author={Moe Kayali and Chi Wang},\n",
|
||||
" year={2022},\n",
|
||||
" booktitle={ArXiv preprint arXiv:2202.09927},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{zhang2023targeted,\n",
|
||||
" title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n",
|
||||
" author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n",
|
||||
" booktitle={International Conference on Learning Representations},\n",
|
||||
" year={2023},\n",
|
||||
" url={https://openreview.net/forum?id=0Ij9_q567Ma},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wang2023EcoOptiGen,\n",
|
||||
" title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n",
|
||||
" author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n",
|
||||
" year={2023},\n",
|
||||
" booktitle={ArXiv preprint arXiv:2303.04673},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"- [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n",
|
||||
"\n",
|
||||
"```bibtex\n",
|
||||
"@inproceedings{wu2023empirical,\n",
|
||||
" title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n",
|
||||
" author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n",
|
||||
" year={2023},\n",
|
||||
" booktitle={ArXiv preprint arXiv:2306.01337},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"# Integrate - Spark\n",
|
||||
"\n",
|
||||
"FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n",
|
||||
"\n",
|
||||
"- Use Spark ML estimators for AutoML.\n",
|
||||
"- Use Spark to run training in parallel spark jobs.\n",
|
||||
"\n",
|
||||
"## Spark ML Estimators\n",
|
||||
"\n",
|
||||
"FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n",
|
||||
"\n",
|
||||
"### Data\n",
|
||||
"\n",
|
||||
"For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n",
|
||||
"\n",
|
||||
"This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n",
|
||||
"\n",
|
||||
"This function also accepts optional arguments `index_col` and `default_index_type`.\n",
|
||||
"\n",
|
||||
"- `index_col` is the column name to use as the index, default is None.\n",
|
||||
"- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n",
|
||||
"\n",
|
||||
"Here is an example code snippet for Spark Data:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import pandas as pd\n",
|
||||
"from flaml.automl.spark.utils import to_pandas_on_spark\n",
|
||||
"\n",
|
||||
"# Creating a dictionary\n",
|
||||
"data = {\n",
|
||||
" \"Square_Feet\": [800, 1200, 1800, 1500, 850],\n",
|
||||
" \"Age_Years\": [20, 15, 10, 7, 25],\n",
|
||||
" \"Price\": [100000, 200000, 300000, 240000, 120000],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Creating a pandas DataFrame\n",
|
||||
"dataframe = pd.DataFrame(data)\n",
|
||||
"label = \"Price\"\n",
|
||||
"\n",
|
||||
"# Convert to pandas-on-spark dataframe\n",
|
||||
"psdf = to_pandas_on_spark(dataframe)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n",
|
||||
"\n",
|
||||
"Here is an example of how to use it:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from pyspark.ml.feature import VectorAssembler\n",
|
||||
"\n",
|
||||
"columns = psdf.columns\n",
|
||||
"feature_cols = [col for col in columns if col != label]\n",
|
||||
"featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n",
|
||||
"psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n",
|
||||
"\n",
|
||||
"### Estimators\n",
|
||||
"\n",
|
||||
"#### Model List\n",
|
||||
"\n",
|
||||
"- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n",
|
||||
"\n",
|
||||
"#### Usage\n",
|
||||
"\n",
|
||||
"First, prepare your data in the required format as described in the previous section.\n",
|
||||
"\n",
|
||||
"By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n",
|
||||
"\n",
|
||||
"Here is an example code snippet using SparkML models in AutoML:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import flaml\n",
|
||||
"\n",
|
||||
"# prepare your data in pandas-on-spark format as we previously mentioned\n",
|
||||
"\n",
|
||||
"automl = flaml.AutoML()\n",
|
||||
"settings = {\n",
|
||||
" \"time_budget\": 30,\n",
|
||||
" \"metric\": \"r2\",\n",
|
||||
" \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n",
|
||||
" \"task\": \"regression\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl.fit(\n",
|
||||
" dataframe=psdf,\n",
|
||||
" label=label,\n",
|
||||
" **settings,\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n",
|
||||
"\n",
|
||||
"## Parallel Spark Jobs\n",
|
||||
"\n",
|
||||
"You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n",
|
||||
"\n",
|
||||
"Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n",
|
||||
"\n",
|
||||
"All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n",
|
||||
"\n",
|
||||
"- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n",
|
||||
"- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performes parallel tuning.\n",
|
||||
"- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n",
|
||||
"\n",
|
||||
"An example code snippet for using parallel Spark jobs:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import flaml\n",
|
||||
"\n",
|
||||
"automl_experiment = flaml.AutoML()\n",
|
||||
"automl_settings = {\n",
|
||||
" \"time_budget\": 30,\n",
|
||||
" \"metric\": \"r2\",\n",
|
||||
" \"task\": \"regression\",\n",
|
||||
" \"n_concurrent_trials\": 2,\n",
|
||||
" \"use_spark\": True,\n",
|
||||
" \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"automl.fit(\n",
|
||||
" dataframe=dataframe,\n",
|
||||
" label=label,\n",
|
||||
" **automl_settings,\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
|
||||
"\n",
|
||||
"The authors of FLAML are Chi Wang, Qingyun Wu, Markus Weimer, and Erkang Zhu.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
|
@ -1436,6 +1135,13 @@
|
|||
"qa_problem = \"Who is the author of FLAML?\"\n",
|
||||
"chat_result = ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -1460,7 +1166,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
"version": "3.11.9"
|
||||
},
|
||||
"skip_test": "Requires interactive usage"
|
||||
},
|
||||
|
|
17
setup.py
17
setup.py
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import platform
|
||||
|
||||
import setuptools
|
||||
|
||||
|
@ -13,6 +14,9 @@ with open(os.path.join(here, "autogen/version.py")) as fp:
|
|||
exec(fp.read(), version)
|
||||
__version__ = version["__version__"]
|
||||
|
||||
|
||||
current_os = platform.system()
|
||||
|
||||
install_requires = [
|
||||
"openai>=1.3",
|
||||
"diskcache",
|
||||
|
@ -46,6 +50,13 @@ retrieve_chat = [
|
|||
"markdownify",
|
||||
]
|
||||
|
||||
retrieve_chat_pgvector = [*retrieve_chat, "pgvector>=0.2.5"]
|
||||
|
||||
if current_os in ["Windows", "Darwin"]:
|
||||
retrieve_chat_pgvector.extend(["psycopg[binary]>=3.1.18"])
|
||||
elif current_os == "Linux":
|
||||
retrieve_chat_pgvector.extend(["psycopg>=3.1.18"])
|
||||
|
||||
extra_require = {
|
||||
"test": [
|
||||
"ipykernel",
|
||||
|
@ -60,11 +71,7 @@ extra_require = {
|
|||
"blendsearch": ["flaml[blendsearch]"],
|
||||
"mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"],
|
||||
"retrievechat": retrieve_chat,
|
||||
"retrievechat-pgvector": [
|
||||
*retrieve_chat,
|
||||
"pgvector>=0.2.5",
|
||||
"psycopg>=3.1.18",
|
||||
],
|
||||
"retrievechat-pgvector": retrieve_chat_pgvector,
|
||||
"retrievechat-qdrant": [
|
||||
*retrieve_chat,
|
||||
"qdrant_client[fastembed]",
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
import pytest
|
||||
from conftest import reason
|
||||
|
@ -8,6 +9,7 @@ sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
|||
|
||||
try:
|
||||
import pgvector
|
||||
import psycopg
|
||||
import sentence_transformers
|
||||
|
||||
from autogen.agentchat.contrib.vectordb.pgvectordb import PGVectorDB
|
||||
|
@ -24,12 +26,52 @@ reason = "do not run on MacOS or windows OR dependency is not installed OR " + r
|
|||
reason=reason,
|
||||
)
|
||||
def test_pgvector():
|
||||
# test create collection
|
||||
# test db config
|
||||
db_config = {
|
||||
"connection_string": "postgresql://postgres:postgres@localhost:5432/postgres",
|
||||
}
|
||||
|
||||
db = PGVectorDB(connection_string=db_config["connection_string"])
|
||||
# test create collection with connection_string authentication
|
||||
db = PGVectorDB(
|
||||
connection_string=db_config["connection_string"],
|
||||
)
|
||||
collection_name = "test_collection"
|
||||
collection = db.create_collection(collection_name=collection_name, overwrite=True, get_or_create=True)
|
||||
assert collection.name == collection_name
|
||||
|
||||
# test create collection with conn object authentication
|
||||
parsed_connection = urllib.parse.urlparse(db_config["connection_string"])
|
||||
encoded_username = urllib.parse.quote(parsed_connection.username, safe="")
|
||||
encoded_password = urllib.parse.quote(parsed_connection.password, safe="")
|
||||
encoded_host = urllib.parse.quote(parsed_connection.hostname, safe="")
|
||||
encoded_database = urllib.parse.quote(parsed_connection.path[1:], safe="")
|
||||
connection_string_encoded = (
|
||||
f"{parsed_connection.scheme}://{encoded_username}:{encoded_password}"
|
||||
f"@{encoded_host}:{parsed_connection.port}/{encoded_database}"
|
||||
)
|
||||
conn = psycopg.connect(conninfo=connection_string_encoded, autocommit=True)
|
||||
|
||||
db = PGVectorDB(conn=conn)
|
||||
collection_name = "test_collection"
|
||||
collection = db.create_collection(collection_name=collection_name, overwrite=True, get_or_create=True)
|
||||
assert collection.name == collection_name
|
||||
|
||||
# test create collection with basic authentication
|
||||
db_config = {
|
||||
"username": "postgres",
|
||||
"password": os.environ.get("POSTGRES_PASSWORD", default="postgres"),
|
||||
"host": "localhost",
|
||||
"port": 5432,
|
||||
"dbname": "postgres",
|
||||
}
|
||||
|
||||
db = PGVectorDB(
|
||||
username=db_config["username"],
|
||||
password=db_config["password"],
|
||||
port=db_config["port"],
|
||||
host=db_config["host"],
|
||||
dbname=db_config["dbname"],
|
||||
)
|
||||
collection_name = "test_collection"
|
||||
collection = db.create_collection(collection_name=collection_name, overwrite=True, get_or_create=True)
|
||||
assert collection.name == collection_name
|
||||
|
|
Loading…
Reference in New Issue