mirror of https://github.com/microsoft/autogen.git
Update usage of built-in LLMs (#3474)
This commit is contained in:
parent
e579a4609d
commit
40cfe07a95
|
@ -19,18 +19,16 @@
|
|||
"[Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) is an all-in-one analytics solution for enterprises that covers everything from data movement to data science, Real-Time Analytics, and business intelligence. It offers a comprehensive suite of services, including data lake, data engineering, and data integration, all in one place. Its pre-built AI models include GPT-x models such as `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `gpt-4-8k`, `gpt-4-32k`, `gpt-35-turbo`, `gpt-35-turbo-16k` and `gpt-35-turbo-instruct`, etc. It's important to note that the Azure Open AI service is not supported on trial SKUs and only paid SKUs (F64 or higher, or P1 or higher) are supported.\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate several examples:\n",
|
||||
"- 0. How to access pre-built LLM endpoints with AutoGen in Microsoft Fabric.\n",
|
||||
"- 1. How to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code.\n",
|
||||
"- 2. How to use `AssistantAgent` and `RetrieveUserProxyAgent` to do Retrieval Augmented Generation (RAG) for QA and Code Generation.\n",
|
||||
"- 3. How to use `MultimodalConversableAgent` to chat with images.\n",
|
||||
"\n",
|
||||
"### Requirements\n",
|
||||
"#### Requirements\n",
|
||||
"\n",
|
||||
"AutoGen requires `Python>=3.8`. To run this notebook example, please install:\n",
|
||||
"```bash\n",
|
||||
"pip install \"pyautogen[retrievechat,lmm]>=0.2.28\"\n",
|
||||
"```\n",
|
||||
"AutoGen requires `Python>=3.8`.\n",
|
||||
"\n",
|
||||
"Also, this notebook depends on Microsoft Fabric pre-built LLM endpoints. Running it elsewhere may encounter errors."
|
||||
"Also, this notebook depends on Microsoft Fabric pre-built LLM endpoints and Fabric runtime 1.2+. Running it elsewhere may encounter errors."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -44,22 +42,12 @@
|
|||
}
|
||||
},
|
||||
"source": [
|
||||
"### Install AutoGen"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install \"pyautogen[retrievechat,lmm]>=0.2.28\" -q"
|
||||
"### Example 0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3",
|
||||
"id": "2",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -68,7 +56,32 @@
|
|||
}
|
||||
},
|
||||
"source": [
|
||||
"### Set up config_list and llm_config"
|
||||
"#### Work with openai<1\n",
|
||||
"\n",
|
||||
"AutoGen can work with openai<1 in Microsoft Fabric. To access pre-built LLM endpoints with AutoGen, you can follow below example.\n",
|
||||
"\n",
|
||||
"This example can run in Fabric runtime 1.2+."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pyautogen<=0.1.14 supports openai<1\n",
|
||||
"%pip install \"pyautogen==0.1.14\" \"openai==0.28.1\" -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -86,26 +99,172 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4o\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Set temperature, timeout and other LLM configurations\n",
|
||||
"llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"timeout\": 600,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.livy.statement-meta+json": {
|
||||
"execution_finish_time": "2024-06-07T15:24:20.5752101Z",
|
||||
"execution_start_time": "2024-06-07T15:24:03.7868628Z",
|
||||
"execution_finish_time": "2024-09-04T11:10:49.8449248Z",
|
||||
"execution_start_time": "2024-09-04T11:09:05.6935167Z",
|
||||
"livy_statement_state": "available",
|
||||
"parent_msg_id": "bf8925aa-a2a2-4686-9388-3ec1eb12c5d7",
|
||||
"queued_time": "2024-06-07T15:23:08.5880731Z",
|
||||
"session_id": "1d5e9aec-2019-408c-a19a-5db9fb175ae2",
|
||||
"normalized_state": "finished",
|
||||
"parent_msg_id": "5dacfdd5-af6c-4acd-9ece-60f9e0758174",
|
||||
"queued_time": "2024-09-04T11:08:29.1314923Z",
|
||||
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
|
||||
"session_start_time": null,
|
||||
"spark_pool": null,
|
||||
"state": "finished",
|
||||
"statement_id": 9,
|
||||
"statement_id": 11,
|
||||
"statement_ids": [
|
||||
9
|
||||
11
|
||||
]
|
||||
},
|
||||
"text/plain": [
|
||||
"StatementMeta(, 1d5e9aec-2019-408c-a19a-5db9fb175ae2, 9, Finished, Available)"
|
||||
"StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 11, Finished, Available, Finished)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-09-04 11:10:05.592463: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-09-04 11:10:11.425719: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33muser\u001b[0m (to gpt-4o):\n",
|
||||
"\n",
|
||||
"Tell me a quick joke.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mgpt-4o\u001b[0m (to user):\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything!\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import autogen\n",
|
||||
"\n",
|
||||
"agent = autogen.agentchat.ConversableAgent(\n",
|
||||
" name=config_list[0][\"model\"], llm_config=llm_config, max_consecutive_auto_reply=1, human_input_mode=\"NEVER\"\n",
|
||||
")\n",
|
||||
"userproxy = autogen.agentchat.ConversableAgent(\n",
|
||||
" name=\"user\",\n",
|
||||
" max_consecutive_auto_reply=0,\n",
|
||||
" llm_config=False,\n",
|
||||
" default_auto_reply=\"TERMINATE\",\n",
|
||||
" human_input_mode=\"NEVER\",\n",
|
||||
")\n",
|
||||
"userproxy.initiate_chat(recipient=agent, message=\"Tell me a quick joke.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"#### Work with openai>=1\n",
|
||||
"\n",
|
||||
"AutoGen can work with openai>=1 in Microsoft Fabric. To access pre-built LLM endpoints with AutoGen, you can follow below example.\n",
|
||||
"\n",
|
||||
"This example and below examples can only run in Fabric runtime 1.3+."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pyautogen>0.1.14 supports openai>=1\n",
|
||||
"%pip install \"pyautogen>0.2\" \"openai>1\" -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.livy.statement-meta+json": {
|
||||
"execution_finish_time": "2024-09-04T11:21:01.4134396Z",
|
||||
"execution_start_time": "2024-09-04T11:21:01.0206532Z",
|
||||
"livy_statement_state": "available",
|
||||
"normalized_state": "finished",
|
||||
"parent_msg_id": "b01282de-e79f-4a3e-9205-73fa8ab87599",
|
||||
"queued_time": "2024-09-04T11:21:00.5084Z",
|
||||
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
|
||||
"session_start_time": null,
|
||||
"spark_pool": null,
|
||||
"state": "finished",
|
||||
"statement_id": 38,
|
||||
"statement_ids": [
|
||||
38
|
||||
]
|
||||
},
|
||||
"text/plain": [
|
||||
"StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 38, Finished, Available, Finished)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
|
@ -113,42 +272,123 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"from synapse.ml.mlflow import get_mlflow_env_config\n",
|
||||
"import types\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"from synapse.ml.fabric.credentials import get_openai_httpx_sync_client\n",
|
||||
"\n",
|
||||
"def get_config_list():\n",
|
||||
" mlflow_env_configs = get_mlflow_env_config()\n",
|
||||
" access_token = mlflow_env_configs.driver_aad_token\n",
|
||||
" prebuilt_AI_base_url = mlflow_env_configs.workload_endpoint + \"cognitive/openai/\"\n",
|
||||
"import autogen\n",
|
||||
"\n",
|
||||
" config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4o\",\n",
|
||||
" \"api_key\": access_token,\n",
|
||||
" \"base_url\": prebuilt_AI_base_url,\n",
|
||||
" \"api_type\": \"azure\",\n",
|
||||
" \"api_version\": \"2024-02-01\",\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
"http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n",
|
||||
"http_client.__deepcopy__ = types.MethodType(\n",
|
||||
" lambda self, memo: self, http_client\n",
|
||||
") # https://microsoft.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n",
|
||||
"\n",
|
||||
" # Set temperature, timeout and other LLM configurations\n",
|
||||
" llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"timeout\": 600,\n",
|
||||
" }\n",
|
||||
" return config_list, llm_config\n",
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4o\",\n",
|
||||
" \"http_client\": http_client,\n",
|
||||
" \"api_version\": \"2024-02-01\",\n",
|
||||
" \"api_type\": \"azure\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Set temperature, timeout and other LLM configurations\n",
|
||||
"llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
" \"temperature\": 0,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.livy.statement-meta+json": {
|
||||
"execution_finish_time": "2024-09-04T11:21:05.2491512Z",
|
||||
"execution_start_time": "2024-09-04T11:21:03.4015759Z",
|
||||
"livy_statement_state": "available",
|
||||
"normalized_state": "finished",
|
||||
"parent_msg_id": "4f4e94fa-4626-445d-86d0-b8fa3e3ae0b5",
|
||||
"queued_time": "2024-09-04T11:21:02.9666054Z",
|
||||
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
|
||||
"session_start_time": null,
|
||||
"spark_pool": null,
|
||||
"state": "finished",
|
||||
"statement_id": 39,
|
||||
"statement_ids": [
|
||||
39
|
||||
]
|
||||
},
|
||||
"text/plain": [
|
||||
"StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 39, Finished, Available, Finished)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33muser\u001b[0m (to gpt-4o):\n",
|
||||
"\n",
|
||||
"Tell me a joke about openai.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mgpt-4o\u001b[0m (to user):\n",
|
||||
"\n",
|
||||
"Why did the AI go to school?\n",
|
||||
"\n",
|
||||
"Because it wanted to improve its \"neural\" network!\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatResult(chat_id=None, chat_history=[{'content': 'Tell me a joke about openai.', 'role': 'assistant', 'name': 'user'}, {'content': 'Why did the AI go to school?\\n\\nBecause it wanted to improve its \"neural\" network!', 'role': 'user', 'name': 'gpt-4o'}], summary='Why did the AI go to school?\\n\\nBecause it wanted to improve its \"neural\" network!', cost={'usage_including_cached_inference': {'total_cost': 0.000435, 'gpt-4o-2024-05-13': {'cost': 0.000435, 'prompt_tokens': 27, 'completion_tokens': 20, 'total_tokens': 47}}, 'usage_excluding_cached_inference': {'total_cost': 0.000435, 'gpt-4o-2024-05-13': {'cost': 0.000435, 'prompt_tokens': 27, 'completion_tokens': 20, 'total_tokens': 47}}}, human_input=[])"
|
||||
]
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import autogen\n",
|
||||
"\n",
|
||||
"config_list, llm_config = get_config_list()\n",
|
||||
"\n",
|
||||
"assert len(config_list) > 0\n",
|
||||
"print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])"
|
||||
"agent = autogen.agentchat.ConversableAgent(\n",
|
||||
" name=config_list[0][\"model\"], llm_config=llm_config, max_consecutive_auto_reply=1, human_input_mode=\"NEVER\"\n",
|
||||
")\n",
|
||||
"userproxy = autogen.agentchat.ConversableAgent(\n",
|
||||
" name=\"user\",\n",
|
||||
" max_consecutive_auto_reply=0,\n",
|
||||
" llm_config=False,\n",
|
||||
" default_auto_reply=\"TERMINATE\",\n",
|
||||
" human_input_mode=\"NEVER\",\n",
|
||||
")\n",
|
||||
"userproxy.initiate_chat(recipient=agent, message=\"Tell me a joke about openai.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5",
|
||||
"id": "10",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -164,7 +404,71 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6",
|
||||
"id": "11",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install \"pyautogen[retrievechat,lmm]>=0.2.28\" -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
"source_hidden": false
|
||||
},
|
||||
"nteract": {
|
||||
"transient": {
|
||||
"deleting": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import types\n",
|
||||
"\n",
|
||||
"from synapse.ml.fabric.credentials import get_openai_httpx_sync_client\n",
|
||||
"\n",
|
||||
"import autogen\n",
|
||||
"\n",
|
||||
"http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n",
|
||||
"http_client.__deepcopy__ = types.MethodType(\n",
|
||||
" lambda self, memo: self, http_client\n",
|
||||
") # https://microsoft.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n",
|
||||
"\n",
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4o\",\n",
|
||||
" \"http_client\": http_client,\n",
|
||||
" \"api_version\": \"2024-02-01\",\n",
|
||||
" \"api_type\": \"azure\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Set temperature, timeout and other LLM configurations\n",
|
||||
"llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
" \"temperature\": 0,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "13",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -340,7 +644,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7",
|
||||
"id": "14",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -392,7 +696,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8",
|
||||
"id": "15",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -410,7 +714,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9",
|
||||
"id": "16",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -466,7 +770,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"id": "17",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -511,7 +815,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11",
|
||||
"id": "18",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -564,7 +868,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12",
|
||||
"id": "19",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -909,7 +1213,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "13",
|
||||
"id": "20",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -961,7 +1265,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14",
|
||||
"id": "21",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -1029,7 +1333,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "15",
|
||||
"id": "22",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -1760,7 +2064,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "16",
|
||||
"id": "23",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -1812,7 +2116,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17",
|
||||
"id": "24",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -1827,7 +2131,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18",
|
||||
"id": "25",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -2913,7 +3217,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "19",
|
||||
"id": "26",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -2930,7 +3234,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "20",
|
||||
"id": "27",
|
||||
"metadata": {
|
||||
"nteract": {
|
||||
"transient": {
|
||||
|
@ -2945,7 +3249,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21",
|
||||
"id": "28",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -3032,7 +3336,7 @@
|
|||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "22",
|
||||
"id": "29",
|
||||
"metadata": {
|
||||
"jupyter": {
|
||||
"outputs_hidden": false,
|
||||
|
@ -3088,7 +3392,8 @@
|
|||
"name": "synapse_pyspark"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "synapse_pyspark",
|
||||
"display_name": "Synapse PySpark",
|
||||
"language": "Python",
|
||||
"name": "synapse_pyspark"
|
||||
},
|
||||
"language_info": {
|
||||
|
|
Loading…
Reference in New Issue