mirror of https://github.com/microsoft/autogen.git
239 lines
7.4 KiB
Plaintext
239 lines
7.4 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/"
|
|
},
|
|
"id": "tLIs1YRdr8jM",
|
|
"outputId": "909c1c70-1a22-4e9d-b7f4-a40e2d737fb0"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"!pip install \"autogen-agentchat~=0.2\"\n",
|
|
"!pip install chromadb\n",
|
|
"!pip install sentence_transformers\n",
|
|
"!pip install tiktoken\n",
|
|
"!pip install pypdf"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "yU8zQPetsW28"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import asyncio\n",
|
|
"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n",
|
|
"\n",
|
|
"import nest_asyncio\n",
|
|
"\n",
|
|
"from autogen import AssistantAgent\n",
|
|
"from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n",
|
|
"from autogen.agentchat.user_proxy_agent import UserProxyAgent"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "R2ITRFQisgzI"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Define an asynchronous function that simulates some asynchronous task (e.g., I/O operation)\n",
|
|
"\n",
|
|
"\n",
|
|
"async def my_asynchronous_function():\n",
|
|
" print(\"Start asynchronous function\")\n",
|
|
" await asyncio.sleep(2) # Simulate some asynchronous task (e.g., I/O operation)\n",
|
|
" print(\"End asynchronous function\")\n",
|
|
" return \"input\"\n",
|
|
"\n",
|
|
"\n",
|
|
"# Define a custom class `CustomisedUserProxyAgent` that extends `UserProxyAgent`\n",
|
|
"\n",
|
|
"\n",
|
|
"class CustomisedUserProxyAgent(UserProxyAgent):\n",
|
|
" # Asynchronous function to get human input\n",
|
|
" async def a_get_human_input(self, prompt: str) -> str:\n",
|
|
" # Call the asynchronous function to get user input asynchronously\n",
|
|
" user_input = await my_asynchronous_function()\n",
|
|
"\n",
|
|
" return user_input\n",
|
|
"\n",
|
|
" # Asynchronous function to receive a message\n",
|
|
"\n",
|
|
" async def a_receive(\n",
|
|
" self,\n",
|
|
" message: Union[Dict, str],\n",
|
|
" sender,\n",
|
|
" request_reply: Optional[bool] = None,\n",
|
|
" silent: Optional[bool] = False,\n",
|
|
" ):\n",
|
|
" # Call the superclass method to handle message reception asynchronously\n",
|
|
" await super().a_receive(message, sender, request_reply, silent)\n",
|
|
"\n",
|
|
"\n",
|
|
"class CustomisedAssistantAgent(AssistantAgent):\n",
|
|
" # Asynchronous function to get human input\n",
|
|
" async def a_get_human_input(self, prompt: str) -> str:\n",
|
|
" # Call the asynchronous function to get user input asynchronously\n",
|
|
" user_input = await my_asynchronous_function()\n",
|
|
"\n",
|
|
" return user_input\n",
|
|
"\n",
|
|
" # Asynchronous function to receive a message\n",
|
|
" async def a_receive(\n",
|
|
" self,\n",
|
|
" message: Union[Dict, str],\n",
|
|
" sender,\n",
|
|
" request_reply: Optional[bool] = None,\n",
|
|
" silent: Optional[bool] = False,\n",
|
|
" ):\n",
|
|
" # Call the superclass method to handle message reception asynchronously\n",
|
|
" await super().a_receive(message, sender, request_reply, silent)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "2v_DhqBFuA1O"
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def create_llm_config(model, temperature, seed):\n",
|
|
" config_list = [\n",
|
|
" {\n",
|
|
" \"model\": \"<model_name>\",\n",
|
|
" \"api_key\": \"<api_key>\",\n",
|
|
" },\n",
|
|
" ]\n",
|
|
"\n",
|
|
" llm_config = {\n",
|
|
" \"seed\": int(seed),\n",
|
|
" \"config_list\": config_list,\n",
|
|
" \"temperature\": float(temperature),\n",
|
|
" }\n",
|
|
"\n",
|
|
" return llm_config"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/"
|
|
},
|
|
"id": "yHIUO2CiwfIp",
|
|
"outputId": "79ceccca-f91c-4dde-9e21-1f2129cdc1a8"
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.10/dist-packages (1.5.8)\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"!pip install nest-asyncio"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"metadata": {
|
|
"colab": {
|
|
"base_uri": "https://localhost:8080/"
|
|
},
|
|
"id": "ywFsRNMDteaH",
|
|
"outputId": "5716adad-de80-4595-eec9-523b1f1cd313"
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"boss (to assistant):\n",
|
|
"\n",
|
|
"Resume Review, Technical Skills Assessment, Project Discussion, Job Role Expectations, Closing Remarks.\n",
|
|
"\n",
|
|
"--------------------------------------------------------------------------------\n",
|
|
"assistant (to boss):\n",
|
|
"\n",
|
|
"1. Can you walk me through your resume, highlighting the most significant parts?\n",
|
|
"2. What technical skills do you possess that make you a strong candidate for this position?\n",
|
|
"3. Can you discuss a project you've worked on that you believe showcases your abilities?\n",
|
|
"4. What are your expectations for this job role?\n",
|
|
"5. Do you have any questions or concerns before we conclude this interview?\n",
|
|
"\n",
|
|
"Can you please provide your feedback on these questions?\n",
|
|
"\n",
|
|
"--------------------------------------------------------------------------------\n",
|
|
"Provide feedback to assistant. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"nest_asyncio.apply()\n",
|
|
"\n",
|
|
"\n",
|
|
"async def main():\n",
|
|
" boss = CustomisedUserProxyAgent(\n",
|
|
" name=\"boss\",\n",
|
|
" human_input_mode=\"ALWAYS\",\n",
|
|
" max_consecutive_auto_reply=0,\n",
|
|
" code_execution_config=False,\n",
|
|
" )\n",
|
|
"\n",
|
|
" assistant = CustomisedAssistantAgent(\n",
|
|
" name=\"assistant\",\n",
|
|
" system_message=\"You will provide some agenda, and I will create questions for an interview meeting. Every time when you generate question then you have to ask user for feedback and if user provides the feedback then you have to incorporate that feedback and generate new set of questions and if user don't want to update then terminate the process and exit\",\n",
|
|
" llm_config=create_llm_config(\"gpt-4\", \"0.4\", \"23\"),\n",
|
|
" )\n",
|
|
"\n",
|
|
" await boss.a_initiate_chat(\n",
|
|
" assistant,\n",
|
|
" message=\"Resume Review, Technical Skills Assessment, Project Discussion, Job Role Expectations, Closing Remarks.\",\n",
|
|
" n_results=3,\n",
|
|
" )\n",
|
|
"\n",
|
|
"\n",
|
|
"await main() # noqa: F704"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"colab": {
|
|
"provenance": [],
|
|
"toc_visible": true
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.13"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 0
|
|
}
|